diff --git a/Makefile b/Makefile index 99052029d26..1fd694b2ebd 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ clean-test: go clean -testcache ./... clean: clean-test + go clean -cache ./... go clean ./... test: clean-test diff --git a/api/address/routes.go b/api/address/routes.go index d9a9cbc12ab..98f8ed93af1 100644 --- a/api/address/routes.go +++ b/api/address/routes.go @@ -71,7 +71,7 @@ func GetBalance(c *gin.Context) { return } - c.JSON(http.StatusOK, gin.H{"balance": balance}) + c.JSON(http.StatusOK, gin.H{"balance": balance.String()}) } func accountResponseFromBaseAccount(address string, account *state.Account) accountResponse { diff --git a/api/address/routes_test.go b/api/address/routes_test.go index 41f19f4918b..19df15789be 100644 --- a/api/address/routes_test.go +++ b/api/address/routes_test.go @@ -29,12 +29,12 @@ type GeneralResponse struct { //addressResponse structure type addressResponse struct { GeneralResponse - Balance *big.Int `json:"balance"` + Balance string `json:"balance"` } func NewAddressResponse() *addressResponse { return &addressResponse{ - Balance: big.NewInt(0), + Balance: "0", } } @@ -84,7 +84,10 @@ func TestGetBalance_WithCorrectAddressShouldNotReturnError(t *testing.T) { addressResponse := NewAddressResponse() loadResponse(resp.Body, &addressResponse) assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, amount, addressResponse.Balance) + + balanceResponse, ok := big.NewInt(0).SetString(addressResponse.Balance, 10) + assert.True(t, ok) + assert.Equal(t, amount, balanceResponse) assert.Equal(t, "", addressResponse.Error) } @@ -106,7 +109,10 @@ func TestGetBalance_WithWrongAddressShouldReturnZero(t *testing.T) { addressResponse := NewAddressResponse() loadResponse(resp.Body, &addressResponse) assert.Equal(t, http.StatusOK, resp.Code) - assert.Equal(t, big.NewInt(0), addressResponse.Balance) + + balanceResponse, ok := big.NewInt(0).SetString(addressResponse.Balance, 10) + assert.True(t, ok) + assert.Equal(t, big.NewInt(0), balanceResponse) assert.Equal(t, "", addressResponse.Error) } @@ -150,7 +156,10 @@ func TestGetBalance_WithEmptyAddressShouldReturnZeroAndError(t *testing.T) { addressResponse := NewAddressResponse() loadResponse(resp.Body, &addressResponse) assert.Equal(t, http.StatusBadRequest, resp.Code) - assert.Equal(t, big.NewInt(0), addressResponse.Balance) + + balanceResponse, ok := big.NewInt(0).SetString(addressResponse.Balance, 10) + assert.True(t, ok) + assert.Equal(t, big.NewInt(0), balanceResponse) assert.NotEmpty(t, addressResponse.Error) assert.True(t, strings.Contains(addressResponse.Error, fmt.Sprintf("%s: %s", errors2.ErrGetBalance.Error(), errors2.ErrEmptyAddress.Error()), diff --git a/api/api.go b/api/api.go index 419ebece8aa..297d9685389 100644 --- a/api/api.go +++ b/api/api.go @@ -1,7 +1,6 @@ package api import ( - "bytes" "net/http" "reflect" @@ -18,9 +17,7 @@ import ( "github.com/gin-contrib/pprof" "github.com/gin-gonic/gin" "github.com/gin-gonic/gin/binding" - "github.com/gin-gonic/gin/json" "github.com/gorilla/websocket" - "github.com/prometheus/client_golang/prometheus/promhttp" "gopkg.in/go-playground/validator.v8" ) @@ -31,19 +28,11 @@ type validatorInput struct { Validator validator.Func } -type prometheus struct { - NodePort string - NetworkID string -} - // MainApiHandler interface defines methods that can be used from `elrondFacade` context variable type MainApiHandler interface { RestApiInterface() string RestAPIServerDebugMode() bool PprofEnabled() bool - PrometheusMonitoring() bool - PrometheusJoinURL() string - PrometheusNetworkID() string IsInterfaceNil() bool } @@ -84,43 +73,9 @@ func Start(elrondFacade MainApiHandler) error { registerRoutes(ws, elrondFacade) - if elrondFacade.PrometheusMonitoring() { - err = joinMonitoringSystem(elrondFacade) - if err != nil { - return err - } - } - return ws.Run(elrondFacade.RestApiInterface()) } -func joinMonitoringSystem(elrondFacade MainApiHandler) error { - prometheusJoinUrl := elrondFacade.PrometheusJoinURL() - structToSend := prometheus{ - NodePort: elrondFacade.RestApiInterface(), - NetworkID: elrondFacade.PrometheusNetworkID(), - } - - jsonValue, err := json.Marshal(structToSend) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", prometheusJoinUrl, bytes.NewBuffer(jsonValue)) - if err != nil { - return err - } - - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return err - } - - err = resp.Body.Close() - return err -} - func registerRoutes(ws *gin.Engine, elrondFacade middleware.ElrondHandler) { nodeRoutes := ws.Group("/node") nodeRoutes.Use(middleware.WithElrondFacade(elrondFacade)) @@ -143,11 +98,7 @@ func registerRoutes(ws *gin.Engine, elrondFacade middleware.ElrondHandler) { valStats.Routes(validatorRoutes) apiHandler, ok := elrondFacade.(MainApiHandler) - if ok && apiHandler.PrometheusMonitoring() { - nodeRoutes.GET("/metrics", gin.WrapH(promhttp.Handler())) - } - - if apiHandler.PprofEnabled() { + if ok && apiHandler.PprofEnabled() { pprof.Register(ws) } diff --git a/api/mock/facade.go b/api/mock/facade.go index b040a63a782..c637c379fc9 100644 --- a/api/mock/facade.go +++ b/api/mock/facade.go @@ -25,8 +25,8 @@ type Facade struct { GetAccountHandler func(address string) (*state.Account, error) GenerateTransactionHandler func(sender string, receiver string, value *big.Int, code string) (*transaction.Transaction, error) GetTransactionHandler func(hash string) (*transaction.Transaction, error) - SendTransactionHandler func(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, code string, signature []byte) (string, error) - CreateTransactionHandler func(nonce uint64, value string, receiverHex string, senderHex string, gasPrice uint64, gasLimit uint64, data string, signatureHex string, challenge string) (*transaction.Transaction, error) + SendTransactionHandler func(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, data []byte, signature []byte) (string, error) + CreateTransactionHandler func(nonce uint64, value string, receiverHex string, senderHex string, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string) (*transaction.Transaction, error) SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) ExecuteSCQueryHandler func(query *process.SCQuery) (*vmcommon.VMOutput, error) StatusMetricsHandler func() external.StatusMetricsHandler @@ -96,12 +96,11 @@ func (f *Facade) CreateTransaction( senderHex string, gasPrice uint64, gasLimit uint64, - data string, + data []byte, signatureHex string, - challenge string, ) (*transaction.Transaction, error) { - return f.CreateTransactionHandler(nonce, value, receiverHex, senderHex, gasPrice, gasLimit, data, signatureHex, challenge) + return f.CreateTransactionHandler(nonce, value, receiverHex, senderHex, gasPrice, gasLimit, data, signatureHex) } // GetTransaction is the mock implementation of a handler's GetTransaction method @@ -110,8 +109,8 @@ func (f *Facade) GetTransaction(hash string) (*transaction.Transaction, error) { } // SendTransaction is the mock implementation of a handler's SendTransaction method -func (f *Facade) SendTransaction(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, code string, signature []byte) (string, error) { - return f.SendTransactionHandler(nonce, sender, receiver, value, gasPrice, gasLimit, code, signature) +func (f *Facade) SendTransaction(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, data []byte, signature []byte) (string, error) { + return f.SendTransactionHandler(nonce, sender, receiver, value, gasPrice, gasLimit, data, signature) } // SendBulkTransactions is the mock implementation of a handler's SendBulkTransactions method diff --git a/api/node/routes.go b/api/node/routes.go index 67d11583cf5..8f7a4964e90 100644 --- a/api/node/routes.go +++ b/api/node/routes.go @@ -3,7 +3,6 @@ package node import ( "math/big" "net/http" - "net/url" "github.com/ElrondNetwork/elrond-go/api/errors" "github.com/ElrondNetwork/elrond-go/core/statistics" @@ -16,7 +15,6 @@ import ( type FacadeHandler interface { IsNodeRunning() bool StartNode() error - GetCurrentPublicKey() string GetHeartbeats() ([]heartbeat.PubKeyHeartbeat, error) TpsBenchmark() *statistics.TpsBenchmark StatusMetrics() external.StatusMetricsHandler @@ -50,30 +48,11 @@ type shardStatisticsResponse struct { // Routes defines node related routes func Routes(router *gin.RouterGroup) { - router.GET("/address", Address) router.GET("/heartbeatstatus", HeartbeatStatus) router.GET("/statistics", Statistics) router.GET("/status", StatusMetrics) } -// Address returns the information about the address passed as parameter -func Address(c *gin.Context) { - ef, ok := c.MustGet("elrondFacade").(FacadeHandler) - if !ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrInvalidAppContext.Error()}) - return - } - - currentAddress := ef.GetCurrentPublicKey() - address, err := url.Parse(currentAddress) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": errors.ErrCouldNotParsePubKey.Error()}) - return - } - - c.JSON(http.StatusOK, gin.H{"address": address.String()}) -} - // HeartbeatStatus respond with the heartbeat status of the node func HeartbeatStatus(c *gin.Context) { ef, ok := c.MustGet("elrondFacade").(FacadeHandler) diff --git a/api/node/routes_test.go b/api/node/routes_test.go index 693deedac94..0e5c5d1f262 100644 --- a/api/node/routes_test.go +++ b/api/node/routes_test.go @@ -34,11 +34,6 @@ type StatusResponse struct { Running bool `json:"running"` } -type AddressResponse struct { - GeneralResponse - Address string `json:"address"` -} - type StatisticsResponse struct { GeneralResponse Statistics struct { @@ -69,66 +64,6 @@ func TestStartNode_FailsWithoutFacade(t *testing.T) { ws.ServeHTTP(resp, req) } -func TestAddress_FailsWithoutFacade(t *testing.T) { - t.Parallel() - ws := startNodeServer(nil) - defer func() { - r := recover() - assert.NotNil(t, r, "Not providing elrondFacade context should panic") - }() - req, _ := http.NewRequest("GET", "/node/address", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) -} - -func TestAddress_FailsWithWrongFacadeTypeConversion(t *testing.T) { - t.Parallel() - ws := startNodeServerWrongFacade() - req, _ := http.NewRequest("GET", "/node/address", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - addressRsp := AddressResponse{} - loadResponse(resp.Body, &addressRsp) - assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, addressRsp.Error, errors.ErrInvalidAppContext.Error()) -} - -func TestAddress_FailsWithInvalidUrlString(t *testing.T) { - facade := mock.Facade{} - facade.GetCurrentPublicKeyHandler = func() string { - // we return a malformed scheme so that url.Parse will error - return "cache_object:foo/bar" - } - ws := startNodeServer(&facade) - req, _ := http.NewRequest("GET", "/node/address", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - addressRsp := AddressResponse{} - loadResponse(resp.Body, &addressRsp) - assert.Equal(t, resp.Code, http.StatusInternalServerError) - assert.Equal(t, addressRsp.Error, errors.ErrCouldNotParsePubKey.Error()) -} - -func TestAddress_ReturnsSuccessfully(t *testing.T) { - facade := mock.Facade{} - address := "abcdefghijklmnopqrstuvwxyz" - facade.GetCurrentPublicKeyHandler = func() string { - // we return a malformed scheme so that url.Parse will error - return address - } - ws := startNodeServer(&facade) - req, _ := http.NewRequest("GET", "/node/address", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) - - addressRsp := AddressResponse{} - loadResponse(resp.Body, &addressRsp) - assert.Equal(t, resp.Code, http.StatusOK) - assert.Equal(t, addressRsp.Address, address) -} - //------- Heartbeatstatus func TestHeartbeatStatus_FailsWithoutFacade(t *testing.T) { diff --git a/api/transaction/routes.go b/api/transaction/routes.go index 127ec1f53f7..d025ae22ba2 100644 --- a/api/transaction/routes.go +++ b/api/transaction/routes.go @@ -13,8 +13,8 @@ import ( // TxService interface defines methods that can be used from `elrondFacade` context variable type TxService interface { - CreateTransaction(nonce uint64, value string, receiverHex string, senderHex string, gasPrice uint64, gasLimit uint64, data string, signatureHex string, challenge string) (*transaction.Transaction, error) - SendTransaction(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, code string, signature []byte) (string, error) + CreateTransaction(nonce uint64, value string, receiverHex string, senderHex string, gasPrice uint64, gasLimit uint64, data []byte, signatureHex string) (*transaction.Transaction, error) + SendTransaction(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, txData []byte, signature []byte) (string, error) SendBulkTransactions([]*transaction.Transaction) (uint64, error) GetTransaction(hash string) (*transaction.Transaction, error) IsInterfaceNil() bool @@ -40,12 +40,11 @@ type SendTxRequest struct { Sender string `form:"sender" json:"sender"` Receiver string `form:"receiver" json:"receiver"` Value string `form:"value" json:"value"` - Data string `form:"data" json:"data"` + Data []byte `form:"data" json:"data"` Nonce uint64 `form:"nonce" json:"nonce"` GasPrice uint64 `form:"gasPrice" json:"gasPrice"` GasLimit uint64 `form:"gasLimit" json:"gasLimit"` Signature string `form:"signature" json:"signature"` - Challenge string `form:"challenge" json:"challenge"` } //TxResponse represents the structure on which the response will be validated against @@ -88,7 +87,7 @@ func SendTransaction(c *gin.Context) { txHash, err := ef.SendTransaction(gtx.Nonce, gtx.Sender, gtx.Receiver, gtx.Value, gtx.GasPrice, gtx.GasLimit, gtx.Data, signature) if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), err.Error())}) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s: %s", errors.ErrTxGenerationFailed.Error(), err.Error())}) return } @@ -121,7 +120,6 @@ func SendMultipleTransactions(c *gin.Context) { receivedTx.GasLimit, receivedTx.Data, receivedTx.Signature, - receivedTx.Challenge, ) if err != nil { continue @@ -175,7 +173,6 @@ func txResponseFromTransaction(tx *transaction.Transaction) TxResponse { response.Receiver = hex.EncodeToString(tx.RcvAddr) response.Data = tx.Data response.Signature = hex.EncodeToString(tx.Signature) - response.Challenge = string(tx.Challenge) response.Value = tx.Value.String() response.GasLimit = tx.GasLimit response.GasPrice = tx.GasPrice diff --git a/api/transaction/routes_test.go b/api/transaction/routes_test.go index 96a8caf370a..650d342636d 100644 --- a/api/transaction/routes_test.go +++ b/api/transaction/routes_test.go @@ -45,7 +45,7 @@ func TestGetTransaction_WithCorrectHashShouldReturnTransaction(t *testing.T) { sender := "sender" receiver := "receiver" value := big.NewInt(10) - data := "data" + data := []byte("data") hash := "hash" facade := mock.Facade{ GetTransactionHandler: func(hash string) (i *tr.Transaction, e error) { @@ -90,7 +90,7 @@ func TestGetTransaction_WithUnknownHashShouldReturnNil(t *testing.T) { return &tr.Transaction{ SndAddr: []byte(sender), RcvAddr: []byte(receiver), - Data: data, + Data: []byte(data), Value: value, }, nil }, @@ -209,7 +209,7 @@ func TestSendTransaction_ErrorWhenFacadeSendTransactionError(t *testing.T) { facade := mock.Facade{ SendTransactionHandler: func(nonce uint64, sender string, receiver string, value string, - gasPrice uint64, gasLimit uint64, code string, signature []byte) (string, error) { + gasPrice uint64, gasLimit uint64, data []byte, signature []byte) (string, error) { return "", errors.New(errorString) }, } @@ -231,7 +231,7 @@ func TestSendTransaction_ErrorWhenFacadeSendTransactionError(t *testing.T) { transactionResponse := TransactionResponse{} loadResponse(resp.Body, &transactionResponse) - assert.Equal(t, http.StatusInternalServerError, resp.Code) + assert.Equal(t, http.StatusBadRequest, resp.Code) assert.Contains(t, transactionResponse.Error, errorString) assert.Empty(t, transactionResponse.TxResp) } @@ -248,7 +248,7 @@ func TestSendTransaction_ReturnsSuccessfully(t *testing.T) { facade := mock.Facade{ SendTransactionHandler: func(nonce uint64, sender string, receiver string, value string, - gasPrice uint64, gasLimit uint64, code string, signature []byte) (string, error) { + gasPrice uint64, gasLimit uint64, data []byte, signature []byte) (string, error) { return txHash, nil }, } diff --git a/cmd/keygenerator/main.go b/cmd/keygenerator/main.go index 07665696ae5..b3acf2e2bea 100644 --- a/cmd/keygenerator/main.go +++ b/cmd/keygenerator/main.go @@ -72,7 +72,7 @@ func backupFileIfExists(filename string) { } } //if we reached here the file probably exists, make a timestamped backup - os.Rename(filename, filename+"."+fmt.Sprintf("%d", time.Now().Unix())) + _ = os.Rename(filename, filename+"."+fmt.Sprintf("%d", time.Now().Unix())) } @@ -101,7 +101,7 @@ func generateFiles(ctx *cli.Context) error { return err } - initialBalancesSkFile, err = os.OpenFile(initialBalancesSkFileName, os.O_CREATE|os.O_WRONLY, 0666) + initialBalancesSkFile, err = os.OpenFile(initialBalancesSkFileName, os.O_CREATE|os.O_WRONLY, core.FileModeUserReadWrite) if err != nil { return err } @@ -112,7 +112,7 @@ func generateFiles(ctx *cli.Context) error { return err } - initialNodesSkFile, err = os.OpenFile(initialNodesSkFileName, os.O_CREATE|os.O_WRONLY, 0666) + initialNodesSkFile, err = os.OpenFile(initialNodesSkFileName, os.O_CREATE|os.O_WRONLY, core.FileModeUserReadWrite) if err != nil { return err } diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index f54175c7ecf..a67e407f237 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -4,12 +4,25 @@ # value will be given as string. For example: "0", "1", "15", "metachain" DestinationShardAsObserver = "0" - # NetworkID will be used for network versions - NetworkID = "undefined" - # StatusPollingIntervalSec represents the no of seconds between multiple polling for the status for AppStatusHandler StatusPollingIntervalSec = 2 +[StoragePruning] + # If the Enabled flag is set to false, then the storers won't divide epochs into separate dbs + Enabled = false + + # If this flag is set to true, the node won't delete any database between epochs + FullArchive = false + + # NumEpochsToKeep - if the flag above is set to false, this will set the number of epochs to keep in the storage. + # Epochs older that (current epoch - NumOfEpochsToKeep) will be removed + NumEpochsToKeep = 3 + + # NumActivePersisters - this will set the number of persisters to keep active at a moment. This works for both + # full archive or not nodes. In case of a node which doesn't have a full archive, it has to be smaller or equal + # to the NumOfEpochsToKeep flag + NumActivePersisters = 2 + [Explorer] Enabled = false IndexerURL = "http://localhost:9200" @@ -25,6 +38,17 @@ MaxBatchSize = 1 MaxOpenFiles = 10 +[MiniBlockHeadersStorage] + [MiniBlockHeadersStorage.Cache] + Size = 300 + Type = "LRU" + [MiniBlockHeadersStorage.DB] + FilePath = "MiniBlockHeaders" + Type = "LvlDBSerial" + BatchDelaySeconds = 30 + MaxBatchSize = 1 + MaxOpenFiles = 10 + [PeerBlockBodyStorage] [PeerBlockBodyStorage.Cache] Size = 1000 @@ -162,23 +186,43 @@ Size = 75000 Type = "LRU" [AccountsTrieStorage.DB] - FilePath = "AccountsTrie" + FilePath = "AccountsTrie/MainDB" Type = "LvlDBSerial" BatchDelaySeconds = 5 MaxBatchSize = 45000 MaxOpenFiles = 10 +[EvictionWaitingList] + Size = 100 + [EvictionWaitingList.DB] + FilePath = "EvictionWaitingList" + Type = "LvlDBSerial" + BatchDelaySeconds = 5 + MaxBatchSize = 10000 + MaxOpenFiles = 10 + +[TrieSnapshotDB] + FilePath = "TrieSnapshot" + Type = "LvlDBSerial" + BatchDelaySeconds = 15 + MaxBatchSize = 40000 + MaxOpenFiles = 10 + [PeerAccountsTrieStorage] [PeerAccountsTrieStorage.Cache] Size = 75000 Type = "LRU" [PeerAccountsTrieStorage.DB] - FilePath = "PeerAccountsTrie" + FilePath = "PeerAccountsTrie/MainDB" Type = "LvlDBSerial" BatchDelaySeconds = 5 MaxBatchSize = 45000 MaxOpenFiles = 10 +[HeadersPoolConfig] + MaxHeadersPerShard = 1000 + NumElementsToRemoveOnEviction = 200 + [BadBlocksCache] Size = 1000 Type = "LRU" @@ -187,31 +231,19 @@ Size = 300 Type = "LRU" -[StateBlockBodyDataPool] - Size = 1000 - Type = "LRU" - [PeerBlockBodyDataPool] Size = 1000 Type = "LRU" -[BlockHeaderDataPool] - Size = 1000 - Type = "LRU" - -[BlockHeaderNoncesDataPool] - Size = 1000 - Type = "LRU" - -[MetaHeaderNoncesDataPool] - Size = 1000 - Type = "LRU" - [TxDataPool] Size = 75000 Type = "FIFOSharded" Shards = 16 +[TrieNodesDataPool] + Size = 50000 + Type = "LRU" + [UnsignedTransactionDataPool] Size = 75000 Type = "LRU" @@ -220,18 +252,6 @@ Size = 75000 Type = "LRU" -[ShardHeadersDataPool] - Size = 1000 - Type = "LRU" - -[MiniBlockHeaderHashesDataPool] - Size = 1000 - Type = "LRU" - -[MetaBlockBodyDataPool] - Size = 1000 - Type = "LRU" - [Logger] Path = "logs" StackTraceDepth = 2 @@ -250,8 +270,19 @@ [MultisigHasher] Type = "blake2b" +# Marshalizer configuration +# Type idenftifies the marshalizer +# SizeCheckDelta the maximum allow drift between the input data buffer and +# the reencoded version (in percents). +# 0 disables the feature. [Marshalizer] Type = "json" + SizeCheckDelta = 0 + +# TODO: change this config to real numbers before merging to development +[EpochStartConfig] + MinRoundsBetweenEpochs = 1000000 + RoundsPerEpoch = 1000000 # ResourceStats, if enabled, will output in a folder called "stats" # resource statistics. For example: number of active go routines, memory allocation, number of GC sweeps, etc. @@ -289,4 +320,7 @@ TimeoutMilliseconds = 100 Version = 0 # Setting 0 means 'use default value' - +[StateTrieConfig] + RoundsModulus = 100 + # TODO: change this to true when pruning bugs are fixed + PruningEnabled = false diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml index 7eb607d37d6..9b0fc19796c 100644 --- a/cmd/node/config/economics.toml +++ b/cmd/node/config/economics.toml @@ -4,17 +4,27 @@ BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" [RewardsSettings] - RewardsValue = "1000" + RewardsValue = "20000000000000000000" #20ERD CommunityPercentage = 0.10 LeaderPercentage = 0.50 BurnPercentage = 0.40 - DenominationCoefficientForView = "0.0001" + DenominationCoefficientForView = "0.000000000000000001" #10^-18 [FeeSettings] MaxGasLimitPerBlock = "1500000000" - MinGasPrice = "0" + MinGasPrice = "100000000000000" #will yield min tx fee of 10ERD MinGasLimit = "100000" - + GasPerDataByte = "1500" + DataLimitForBaseCalc = "10000" [ValidatorSettings] - StakeValue = "500000000000000000000000" + StakeValue = "500000000000000000000000" #500000ERD UnBoundPeriod = "100000" + +[RatingSettings] + StartRating = 50 + MaxRating = 100 + MinRating = 1 + ProposerIncreaseRatingStep = 2 + ProposerDecreaseRatingStep = 4 + ValidatorIncreaseRatingStep = 1 + ValidatorDecreaseRatingStep = 2 diff --git a/cmd/node/config/server.toml b/cmd/node/config/server.toml index 0f3e35676b7..c459a5fd74f 100644 --- a/cmd/node/config/server.toml +++ b/cmd/node/config/server.toml @@ -1,7 +1,3 @@ [ElasticSearch] Username = "basic_auth_username" Password = "basic_auth_password" -[Prometheus] - PrometheusBaseURL = "" - JoinRoute = "/join" - StatusRoute = "/status" diff --git a/cmd/node/factory/interface.go b/cmd/node/factory/interface.go index f01b65319a5..46b6bba4d01 100644 --- a/cmd/node/factory/interface.go +++ b/cmd/node/factory/interface.go @@ -2,12 +2,21 @@ package factory import ( "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" ) +// EpochStartNotifier defines which actions should be done for handling new epoch's events +type EpochStartNotifier interface { + RegisterHandler(handler notifier.SubscribeFunctionHandler) + UnregisterHandler(handler notifier.SubscribeFunctionHandler) + NotifyAll(hdr data.HeaderHandler) + IsInterfaceNil() bool +} + //HeaderSigVerifierHandler is the interface needed to check a header if is correct type HeaderSigVerifierHandler interface { VerifyRandSeed(header data.HeaderHandler) error VerifyRandSeedAndLeaderSignature(header data.HeaderHandler) error VerifySignature(header data.HeaderHandler) error IsInterfaceNil() bool -} +} \ No newline at end of file diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index c6f6db10681..5f3e62fcceb 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -6,20 +6,20 @@ import ( "crypto/rand" "encoding/hex" "errors" + "fmt" "io" "math/big" - "path/filepath" "time" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/round" "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/genesis" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/core/serviceContainer" "github.com/ElrondNetwork/elrond-go/core/statistics/softwareVersion" - factorySoftawareVersion "github.com/ElrondNetwork/elrond-go/core/statistics/softwareVersion/factory" + factorySoftwareVersion "github.com/ElrondNetwork/elrond-go/core/statistics/softwareVersion/factory" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" @@ -34,15 +34,22 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" factoryState "github.com/ElrondNetwork/elrond-go/data/state/factory" "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/trie/factory" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" shardfactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" + "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/txpool" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/genesis" + metachainEpochStart "github.com/ElrondNetwork/elrond-go/epochStart/metachain" + "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/blake2b" "github.com/ElrondNetwork/elrond-go/hashing/sha256" @@ -60,7 +67,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/economics" - "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/headerCheck" @@ -70,10 +76,12 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" processSync "github.com/ElrondNetwork/elrond-go/process/sync" + "github.com/ElrondNetwork/elrond-go/process/track" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" "github.com/ElrondNetwork/elrond-go/storage" + storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/ElrondNetwork/elrond-go/storage/timecache" @@ -160,26 +168,30 @@ type Process struct { InterceptorsContainer process.InterceptorsContainer ResolversFinder dataRetriever.ResolversFinder Rounder consensus.Rounder + EpochStartTrigger epochStart.TriggerHandler ForkDetector process.ForkDetector BlockProcessor process.BlockProcessor BlackListHandler process.BlackListHandler BootStorer process.BootStorer HeaderSigVerifier HeaderSigVerifierHandler ValidatorsStatistics process.ValidatorStatisticsProcessor + BlockTracker process.BlockTracker } type coreComponentsFactoryArgs struct { - config *config.Config - uniqueID string - chainID []byte + config *config.Config + pathManager storage.PathManagerHandler + shardId string + chainID []byte } // NewCoreComponentsFactoryArgs initializes the arguments necessary for creating the core components -func NewCoreComponentsFactoryArgs(config *config.Config, uniqueID string, chainID []byte) *coreComponentsFactoryArgs { +func NewCoreComponentsFactoryArgs(config *config.Config, pathManager storage.PathManagerHandler, shardId string, chainID []byte) *coreComponentsFactoryArgs { return &coreComponentsFactoryArgs{ - config: config, - uniqueID: uniqueID, - chainID: chainID, + config: config, + pathManager: pathManager, + shardId: shardId, + chainID: chainID, } } @@ -195,9 +207,24 @@ func CoreComponentsFactory(args *coreComponentsFactoryArgs) (*Core, error) { return nil, errors.New("could not create marshalizer: " + err.Error()) } - merkleTrie, err := getTrie(args.config.AccountsTrieStorage, marshalizer, hasher, args.uniqueID) + trieFactoryArgs := factory.TrieFactoryArgs{ + Cfg: args.config.AccountsTrieStorage, + EvictionWaitingListCfg: args.config.EvictionWaitingList, + SnapshotDbCfg: args.config.TrieSnapshotDB, + Marshalizer: marshalizer, + Hasher: hasher, + PathManager: args.pathManager, + ShardId: args.shardId, + PruningEnabled: args.config.StateTrieConfig.PruningEnabled, + } + trieFactory, err := factory.NewTrieFactory(trieFactoryArgs) + if err != nil { + return nil, err + } + + merkleTrie, err := trieFactory.Create() if err != nil { - return nil, errors.New("error creating trie: " + err.Error()) + return nil, err } uint64ByteSliceConverter := uint64ByteSlice.NewBigEndianConverter() @@ -216,7 +243,7 @@ type stateComponentsFactoryArgs struct { genesisConfig *sharding.Genesis shardCoordinator sharding.Coordinator core *Core - uniqueID string + pathManager storage.PathManagerHandler } // NewStateComponentsFactoryArgs initializes the arguments necessary for creating the state components @@ -225,14 +252,14 @@ func NewStateComponentsFactoryArgs( genesisConfig *sharding.Genesis, shardCoordinator sharding.Coordinator, core *Core, - uniqueID string, + pathManager storage.PathManagerHandler, ) *stateComponentsFactoryArgs { return &stateComponentsFactoryArgs{ config: config, genesisConfig: genesisConfig, shardCoordinator: shardCoordinator, core: core, - uniqueID: uniqueID, + pathManager: pathManager, } } @@ -269,12 +296,29 @@ func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { return nil, errors.New("initial balances could not be processed " + err.Error()) } - peerAccountsTrie, err := getTrie( - args.config.PeerAccountsTrieStorage, - args.core.Marshalizer, - args.core.Hasher, - args.uniqueID, - ) + var shardId string + if args.shardCoordinator.SelfId() > args.shardCoordinator.NumberOfShards() { + shardId = "metachain" + } else { + shardId = fmt.Sprintf("%d", args.shardCoordinator.SelfId()) + } + + peerAccountsTrieFactoryArguments := factory.TrieFactoryArgs{ + Cfg: args.config.PeerAccountsTrieStorage, + EvictionWaitingListCfg: args.config.EvictionWaitingList, + SnapshotDbCfg: args.config.TrieSnapshotDB, + Marshalizer: args.core.Marshalizer, + Hasher: args.core.Hasher, + PathManager: args.pathManager, + ShardId: shardId, + PruningEnabled: args.config.StateTrieConfig.PruningEnabled, + } + peerAccountsTrieFactory, err := factory.NewTrieFactory(peerAccountsTrieFactoryArguments) + if err != nil { + return nil, err + } + + peerAccountsTrie, err := peerAccountsTrieFactory.Create() if err != nil { return nil, err } @@ -299,10 +343,12 @@ func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { } type dataComponentsFactoryArgs struct { - config *config.Config - shardCoordinator sharding.Coordinator - core *Core - uniqueID string + config *config.Config + shardCoordinator sharding.Coordinator + core *Core + pathManager storage.PathManagerHandler + epochStartNotifier EpochStartNotifier + currentEpoch uint32 } // NewDataComponentsFactoryArgs initializes the arguments necessary for creating the data components @@ -310,13 +356,17 @@ func NewDataComponentsFactoryArgs( config *config.Config, shardCoordinator sharding.Coordinator, core *Core, - uniqueID string, + pathManager storage.PathManagerHandler, + epochStartNotifier EpochStartNotifier, + currentEpoch uint32, ) *dataComponentsFactoryArgs { return &dataComponentsFactoryArgs{ - config: config, - shardCoordinator: shardCoordinator, - core: core, - uniqueID: uniqueID, + config: config, + shardCoordinator: shardCoordinator, + core: core, + pathManager: pathManager, + epochStartNotifier: epochStartNotifier, + currentEpoch: currentEpoch, } } @@ -329,19 +379,25 @@ func DataComponentsFactory(args *dataComponentsFactoryArgs) (*Data, error) { return nil, errors.New("could not create block chain: " + err.Error()) } - store, err := createDataStoreFromConfig(args.config, args.shardCoordinator, args.uniqueID) + store, err := createDataStoreFromConfig( + args.config, + args.shardCoordinator, + args.pathManager, + args.epochStartNotifier, + args.currentEpoch, + ) if err != nil { return nil, errors.New("could not create local data store: " + err.Error()) } if args.shardCoordinator.SelfId() < args.shardCoordinator.NumberOfShards() { - datapool, err = createShardDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) + datapool, err = createShardDataPoolFromConfig(args.config) if err != nil { return nil, errors.New("could not create shard data pools: " + err.Error()) } } if args.shardCoordinator.SelfId() == sharding.MetachainShardId { - metaDatapool, err = createMetaDataPoolFromConfig(args.config, args.core.Uint64ByteSliceConverter) + metaDatapool, err = createMetaDataPoolFromConfig(args.config) if err != nil { return nil, errors.New("could not create shard data pools: " + err.Error()) } @@ -466,21 +522,27 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log logger.Logger, co } type processComponentsFactoryArgs struct { - coreComponents *coreComponentsFactoryArgs - genesisConfig *sharding.Genesis - economicsData *economics.EconomicsData - nodesConfig *sharding.NodesSetup - gasSchedule map[string]map[string]uint64 - syncer ntp.SyncTimer - shardCoordinator sharding.Coordinator - nodesCoordinator sharding.NodesCoordinator - data *Data - core *Core - crypto *Crypto - state *State - network *Network - coreServiceContainer serviceContainer.Core - requestedItemsHandler dataRetriever.RequestedItemsHandler + coreComponents *coreComponentsFactoryArgs + genesisConfig *sharding.Genesis + economicsData *economics.EconomicsData + nodesConfig *sharding.NodesSetup + gasSchedule map[string]map[string]uint64 + syncer ntp.SyncTimer + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + data *Data + core *Core + crypto *Crypto + state *State + network *Network + coreServiceContainer serviceContainer.Core + requestedItemsHandler dataRetriever.RequestedItemsHandler + epochStartNotifier EpochStartNotifier + epochStart *config.EpochStartConfig + startEpochNum uint32 + rater sharding.RaterHandler + sizeCheckDelta uint32 + stateCheckpointModulus uint } // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components @@ -500,23 +562,35 @@ func NewProcessComponentsFactoryArgs( network *Network, coreServiceContainer serviceContainer.Core, requestedItemsHandler dataRetriever.RequestedItemsHandler, + epochStartNotifier EpochStartNotifier, + epochStart *config.EpochStartConfig, + startEpochNum uint32, + rater sharding.RaterHandler, + sizeCheckDelta uint32, + stateCheckpointModulus uint, ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ - coreComponents: coreComponents, - genesisConfig: genesisConfig, - economicsData: economicsData, - nodesConfig: nodesConfig, - gasSchedule: gasSchedule, - syncer: syncer, - shardCoordinator: shardCoordinator, - nodesCoordinator: nodesCoordinator, - data: data, - core: core, - crypto: crypto, - state: state, - network: network, - coreServiceContainer: coreServiceContainer, - requestedItemsHandler: requestedItemsHandler, + coreComponents: coreComponents, + genesisConfig: genesisConfig, + economicsData: economicsData, + nodesConfig: nodesConfig, + gasSchedule: gasSchedule, + syncer: syncer, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + data: data, + core: core, + crypto: crypto, + state: state, + network: network, + coreServiceContainer: coreServiceContainer, + requestedItemsHandler: requestedItemsHandler, + epochStartNotifier: epochStartNotifier, + epochStart: epochStart, + startEpochNum: startEpochNum, + rater: rater, + sizeCheckDelta: sizeCheckDelta, + stateCheckpointModulus: stateCheckpointModulus, } } @@ -535,15 +609,26 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } + rounder, err := round.NewRound( + time.Unix(args.nodesConfig.StartTime, 0), + args.syncer.CurrentTime(), + time.Millisecond*time.Duration(args.nodesConfig.RoundDuration), + args.syncer) + if err != nil { + return nil, err + } + interceptorContainerFactory, resolversContainerFactory, blackListHandler, err := newInterceptorAndResolverContainerFactory( args.shardCoordinator, args.nodesCoordinator, - args.data, args.core, + args.data, + args.core, args.crypto, args.state, args.network, args.economicsData, headerSigVerifier, + args.sizeCheckDelta, ) if err != nil { return nil, err @@ -565,16 +650,17 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } - rounder, err := round.NewRound( - time.Unix(args.nodesConfig.StartTime, 0), - args.syncer.CurrentTime(), - time.Millisecond*time.Duration(args.nodesConfig.RoundDuration), - args.syncer) + requestHandler, err := newRequestHandler(resolversFinder, args.shardCoordinator, args.requestedItemsHandler) if err != nil { return nil, err } - forkDetector, err := newForkDetector(rounder, args.shardCoordinator, blackListHandler, args.nodesConfig.StartTime) + epochStartTrigger, err := newEpochStartTrigger(args, requestHandler) + if err != nil { + return nil, err + } + + err = dataRetriever.SetEpochHandlerToHdrResolver(resolversContainer, epochStartTrigger) if err != nil { return nil, err } @@ -615,14 +701,47 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } + argsHeaderValidator := block.ArgsHeaderValidator{ + Hasher: args.core.Hasher, + Marshalizer: args.core.Marshalizer, + } + headerValidator, err := block.NewHeaderValidator(argsHeaderValidator) + if err != nil { + return nil, err + } + + blockTracker, err := newBlockTracker( + args, + headerValidator, + requestHandler, + rounder, + genesisBlocks, + ) + if err != nil { + return nil, err + } + + forkDetector, err := newForkDetector( + rounder, + args.shardCoordinator, + blackListHandler, + blockTracker, + args.nodesConfig.StartTime, + ) + if err != nil { + return nil, err + } + blockProcessor, err := newBlockProcessor( args, - resolversFinder, + requestHandler, forkDetector, - genesisBlocks, rounder, + epochStartTrigger, bootStorer, validatorStatisticsProcessor, + headerValidator, + blockTracker, ) if err != nil { return nil, err @@ -634,10 +753,12 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err Rounder: rounder, ForkDetector: forkDetector, BlockProcessor: blockProcessor, + EpochStartTrigger: epochStartTrigger, BlackListHandler: blackListHandler, BootStorer: bootStorer, HeaderSigVerifier: headerSigVerifier, ValidatorsStatistics: validatorStatisticsProcessor, + BlockTracker: blockTracker, }, nil } @@ -679,6 +800,96 @@ func prepareGenesisBlock(args *processComponentsFactoryArgs, genesisBlocks map[u return nil } +func newRequestHandler( + resolversFinder dataRetriever.ResolversFinder, + shardCoordinator sharding.Coordinator, + requestedItemsHandler dataRetriever.RequestedItemsHandler, +) (process.RequestHandler, error) { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + requestHandler, err := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + requestedItemsHandler, + MaxTxsToRequest, + shardCoordinator.SelfId(), + ) + if err != nil { + return nil, err + } + + return requestHandler, nil + } + + if shardCoordinator.SelfId() == sharding.MetachainShardId { + requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( + resolversFinder, + requestedItemsHandler, + MaxTxsToRequest, + ) + if err != nil { + return nil, err + } + + return requestHandler, nil + } + + return nil, errors.New("could not create new request handler because of wrong shard id") +} + +func newEpochStartTrigger( + args *processComponentsFactoryArgs, + requestHandler epochStart.RequestHandler, +) (epochStart.TriggerHandler, error) { + if args.shardCoordinator.SelfId() < args.shardCoordinator.NumberOfShards() { + argsHeaderValidator := block.ArgsHeaderValidator{ + Hasher: args.core.Hasher, + Marshalizer: args.core.Marshalizer, + } + headerValidator, err := block.NewHeaderValidator(argsHeaderValidator) + if err != nil { + return nil, err + } + + argEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: args.core.Marshalizer, + Hasher: args.core.Hasher, + HeaderValidator: headerValidator, + Uint64Converter: args.core.Uint64ByteSliceConverter, + DataPool: args.data.Datapool, + Storage: args.data.Store, + RequestHandler: requestHandler, + Epoch: args.startEpochNum, + EpochStartNotifier: args.epochStartNotifier, + Validity: process.MetaBlockValidity, + Finality: process.BlockFinality, + } + epochStartTrigger, err := shardchain.NewEpochStartTrigger(argEpochStart) + if err != nil { + return nil, errors.New("error creating new start of epoch trigger" + err.Error()) + } + + return epochStartTrigger, nil + } + + if args.shardCoordinator.SelfId() == sharding.MetachainShardId { + argEpochStart := &metachainEpochStart.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(args.nodesConfig.StartTime, 0), + Settings: args.epochStart, + Epoch: args.startEpochNum, + EpochStartNotifier: args.epochStartNotifier, + Storage: args.data.Store, + Marshalizer: args.core.Marshalizer, + } + epochStartTrigger, err := metachainEpochStart.NewEpochStartTrigger(argEpochStart) + if err != nil { + return nil, errors.New("error creating new start of epoch trigger" + err.Error()) + } + + return epochStartTrigger, nil + } + + return nil, errors.New("error creating new start of epoch trigger because of invalid shard id") +} + type seedRandReader struct { index int seed []byte @@ -716,7 +927,7 @@ func (srr *seedRandReader) Read(p []byte) (n int, err error) { // CreateSoftwareVersionChecker will create a new software version checker and will start check if a new software version // is available func CreateSoftwareVersionChecker(statusHandler core.AppStatusHandler) (*softwareVersion.SoftwareVersionChecker, error) { - softwareVersionCheckerFactory, err := factorySoftawareVersion.NewSoftwareVersionFactory(statusHandler) + softwareVersionCheckerFactory, err := factorySoftwareVersion.NewSoftwareVersionFactory(statusHandler) if err != nil { return nil, err } @@ -749,25 +960,6 @@ func getMarshalizerFromConfig(cfg *config.Config) (marshal.Marshalizer, error) { return nil, errors.New("no marshalizer provided in config file") } -func getTrie( - cfg config.StorageConfig, - marshalizer marshal.Marshalizer, - hasher hashing.Hasher, - uniqueID string, -) (data.Trie, error) { - - accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(cfg.Cache), - getDBFromConfig(cfg.DB, uniqueID), - getBloomFromConfig(cfg.Bloom), - ) - if err != nil { - return nil, errors.New("error creating accountsTrieStorage: " + err.Error()) - } - - return trie.NewTrie(accountsTrieStorage, marshalizer, hasher) -} - func createBlockChainFromConfig(config *config.Config, coordinator sharding.Coordinator, ash core.AppStatusHandler) (data.ChainHandler, error) { badBlockCache, err := storageUnit.NewCache( storageUnit.CacheType(config.BadBlocksCache.Type), @@ -813,441 +1005,80 @@ func createBlockChainFromConfig(config *config.Config, coordinator sharding.Coor func createDataStoreFromConfig( config *config.Config, shardCoordinator sharding.Coordinator, - uniqueID string, -) (dataRetriever.StorageService, error) { - if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return createShardDataStoreFromConfig(config, shardCoordinator, uniqueID) - } - if shardCoordinator.SelfId() == sharding.MetachainShardId { - return createMetaChainDataStoreFromConfig(config, shardCoordinator, uniqueID) - } - return nil, errors.New("can not create data store") -} - -func createShardDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, -) (dataRetriever.StorageService, error) { - - var headerUnit *storageUnit.Unit - var peerBlockUnit *storageUnit.Unit - var miniBlockUnit *storageUnit.Unit - var txUnit *storageUnit.Unit - var metachainHeaderUnit *storageUnit.Unit - var unsignedTxUnit *storageUnit.Unit - var rewardTxUnit *storageUnit.Unit - var metaHdrHashNonceUnit *storageUnit.Unit - var shardHdrHashNonceUnit *storageUnit.Unit - var bootstrapUnit *storageUnit.Unit - var heartbeatStorageUnit *storageUnit.Unit - var statusMetricsStorageUnit *storageUnit.Unit - var err error - - defer func() { - // cleanup - if err != nil { - if headerUnit != nil { - _ = headerUnit.DestroyUnit() - } - if peerBlockUnit != nil { - _ = peerBlockUnit.DestroyUnit() - } - if miniBlockUnit != nil { - _ = miniBlockUnit.DestroyUnit() - } - if txUnit != nil { - _ = txUnit.DestroyUnit() - } - if unsignedTxUnit != nil { - _ = unsignedTxUnit.DestroyUnit() - } - if rewardTxUnit != nil { - _ = rewardTxUnit.DestroyUnit() - } - if metachainHeaderUnit != nil { - _ = metachainHeaderUnit.DestroyUnit() - } - if metaHdrHashNonceUnit != nil { - _ = metaHdrHashNonceUnit.DestroyUnit() - } - if shardHdrHashNonceUnit != nil { - _ = shardHdrHashNonceUnit.DestroyUnit() - } - if bootstrapUnit != nil { - _ = bootstrapUnit.DestroyUnit() - } - if heartbeatStorageUnit != nil { - _ = heartbeatStorageUnit.DestroyUnit() - } - if statusMetricsStorageUnit != nil { - _ = statusMetricsStorageUnit.DestroyUnit() - } - } - }() - - txUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.TxStorage.Cache), - getDBFromConfig(config.TxStorage.DB, uniqueID), - getBloomFromConfig(config.TxStorage.Bloom)) - if err != nil { - return nil, err - } - - unsignedTxUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.UnsignedTransactionStorage.Cache), - getDBFromConfig(config.UnsignedTransactionStorage.DB, uniqueID), - getBloomFromConfig(config.UnsignedTransactionStorage.Bloom)) - if err != nil { - return nil, err - } - - rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.RewardTxStorage.Cache), - getDBFromConfig(config.RewardTxStorage.DB, uniqueID), - getBloomFromConfig(config.RewardTxStorage.Bloom)) - if err != nil { - return nil, err - } - - miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MiniBlocksStorage.Cache), - getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), - getBloomFromConfig(config.MiniBlocksStorage.Bloom)) - if err != nil { - return nil, err - } - - peerBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.PeerBlockBodyStorage.Cache), - getDBFromConfig(config.PeerBlockBodyStorage.DB, uniqueID), - getBloomFromConfig(config.PeerBlockBodyStorage.Bloom)) - if err != nil { - return nil, err - } - - headerUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BlockHeaderStorage.Cache), - getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), - getBloomFromConfig(config.BlockHeaderStorage.Bloom)) - if err != nil { - return nil, err - } - - metachainHeaderUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaBlockStorage.Cache), - getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), - getBloomFromConfig(config.MetaBlockStorage.Bloom)) - if err != nil { - return nil, err - } - - metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), - getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), - ) - if err != nil { - return nil, err - } - - shardHdrHashNonceUnit, err = storageUnit.NewShardedStorageUnitFromConf( - getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), - getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), - shardCoordinator.SelfId(), - ) - if err != nil { - return nil, err - } - - heartbeatStorageUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.Heartbeat.HeartbeatStorage.Cache), - getDBFromConfig(config.Heartbeat.HeartbeatStorage.DB, uniqueID), - getBloomFromConfig(config.Heartbeat.HeartbeatStorage.Bloom)) - if err != nil { - return nil, err - } - - bootstrapUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BootstrapStorage.Cache), - getDBFromConfig(config.BootstrapStorage.DB, uniqueID), - getBloomFromConfig(config.BootstrapStorage.Bloom)) - if err != nil { - return nil, err - } - - statusMetricsStorageUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.StatusMetricsStorage.Cache), - getDBFromConfig(config.StatusMetricsStorage.DB, uniqueID), - getBloomFromConfig(config.StatusMetricsStorage.Bloom)) - if err != nil { - return nil, err - } - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.TransactionUnit, txUnit) - store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) - store.AddStorer(dataRetriever.PeerChangesUnit, peerBlockUnit) - store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) - store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) - store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) - store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) - store.AddStorer(dataRetriever.BootstrapUnit, bootstrapUnit) - store.AddStorer(dataRetriever.StatusMetricsUnit, statusMetricsStorageUnit) - - return store, err -} - -func createMetaChainDataStoreFromConfig( - config *config.Config, - shardCoordinator sharding.Coordinator, - uniqueID string, + pathManager storage.PathManagerHandler, + epochStartNotifier EpochStartNotifier, + currentEpoch uint32, ) (dataRetriever.StorageService, error) { - var peerDataUnit, shardDataUnit, metaBlockUnit, headerUnit, metaHdrHashNonceUnit *storageUnit.Unit - var txUnit, miniBlockUnit, unsignedTxUnit *storageUnit.Unit - var shardHdrHashNonceUnits []*storageUnit.Unit - var bootstrapUnit *storageUnit.Unit - var heartbeatStorageUnit *storageUnit.Unit - var statusMetricsStorageUnit *storageUnit.Unit - - var err error - - defer func() { - // cleanup - if err != nil { - if peerDataUnit != nil { - _ = peerDataUnit.DestroyUnit() - } - if shardDataUnit != nil { - _ = shardDataUnit.DestroyUnit() - } - if metaBlockUnit != nil { - _ = metaBlockUnit.DestroyUnit() - } - if headerUnit != nil { - _ = headerUnit.DestroyUnit() - } - if metaHdrHashNonceUnit != nil { - _ = metaHdrHashNonceUnit.DestroyUnit() - } - if shardHdrHashNonceUnits != nil { - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - _ = shardHdrHashNonceUnits[i].DestroyUnit() - } - } - if txUnit != nil { - _ = txUnit.DestroyUnit() - } - if unsignedTxUnit != nil { - _ = unsignedTxUnit.DestroyUnit() - } - if miniBlockUnit != nil { - _ = miniBlockUnit.DestroyUnit() - } - if bootstrapUnit != nil { - _ = bootstrapUnit.DestroyUnit() - } - if heartbeatStorageUnit != nil { - _ = heartbeatStorageUnit.DestroyUnit() - } - if statusMetricsStorageUnit != nil { - _ = statusMetricsStorageUnit.DestroyUnit() - } - } - }() - - metaBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaBlockStorage.Cache), - getDBFromConfig(config.MetaBlockStorage.DB, uniqueID), - getBloomFromConfig(config.MetaBlockStorage.Bloom)) - if err != nil { - return nil, err - } - - shardDataUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.ShardDataStorage.Cache), - getDBFromConfig(config.ShardDataStorage.DB, uniqueID), - getBloomFromConfig(config.ShardDataStorage.Bloom)) - if err != nil { - return nil, err - } - - peerDataUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.PeerDataStorage.Cache), - getDBFromConfig(config.PeerDataStorage.DB, uniqueID), - getBloomFromConfig(config.PeerDataStorage.Bloom)) - if err != nil { - return nil, err - } - - headerUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BlockHeaderStorage.Cache), - getDBFromConfig(config.BlockHeaderStorage.DB, uniqueID), - getBloomFromConfig(config.BlockHeaderStorage.Bloom)) - if err != nil { - return nil, err - } - - metaHdrHashNonceUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MetaHdrNonceHashStorage.Cache), - getDBFromConfig(config.MetaHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.MetaHdrNonceHashStorage.Bloom), + storageServiceFactory, err := storageFactory.NewStorageServiceFactory( + config, + shardCoordinator, + pathManager, + epochStartNotifier, + currentEpoch, ) if err != nil { return nil, err } - - shardHdrHashNonceUnits = make([]*storageUnit.Unit, shardCoordinator.NumberOfShards()) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - shardHdrHashNonceUnits[i], err = storageUnit.NewShardedStorageUnitFromConf( - getCacherFromConfig(config.ShardHdrNonceHashStorage.Cache), - getDBFromConfig(config.ShardHdrNonceHashStorage.DB, uniqueID), - getBloomFromConfig(config.ShardHdrNonceHashStorage.Bloom), - i, - ) - if err != nil { - return nil, err - } - } - - heartbeatStorageUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.Heartbeat.HeartbeatStorage.Cache), - getDBFromConfig(config.Heartbeat.HeartbeatStorage.DB, uniqueID), - getBloomFromConfig(config.Heartbeat.HeartbeatStorage.Bloom)) - if err != nil { - return nil, err - } - - txUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.TxStorage.Cache), - getDBFromConfig(config.TxStorage.DB, uniqueID), - getBloomFromConfig(config.TxStorage.Bloom)) - if err != nil { - return nil, err - } - - unsignedTxUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.UnsignedTransactionStorage.Cache), - getDBFromConfig(config.UnsignedTransactionStorage.DB, uniqueID), - getBloomFromConfig(config.UnsignedTransactionStorage.Bloom)) - if err != nil { - return nil, err - } - - miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.MiniBlocksStorage.Cache), - getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), - getBloomFromConfig(config.MiniBlocksStorage.Bloom)) - if err != nil { - return nil, err - } - - bootstrapUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.BootstrapStorage.Cache), - getDBFromConfig(config.BootstrapStorage.DB, uniqueID), - getBloomFromConfig(config.BootstrapStorage.Bloom)) - if err != nil { - return nil, err - } - - statusMetricsStorageUnit, err = storageUnit.NewStorageUnitFromConf( - getCacherFromConfig(config.StatusMetricsStorage.Cache), - getDBFromConfig(config.StatusMetricsStorage.DB, uniqueID), - getBloomFromConfig(config.StatusMetricsStorage.Bloom)) - if err != nil { - return nil, err + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { + return storageServiceFactory.CreateForShard() } - - store := dataRetriever.NewChainStorer() - store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) - store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) - store.AddStorer(dataRetriever.MetaPeerDataUnit, peerDataUnit) - store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) - store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) - store.AddStorer(dataRetriever.TransactionUnit, txUnit) - store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) - store.AddStorer(dataRetriever.MiniBlockUnit, unsignedTxUnit) - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) - store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) + if shardCoordinator.SelfId() == sharding.MetachainShardId { + return storageServiceFactory.CreateForMeta() } - store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) - store.AddStorer(dataRetriever.BootstrapUnit, bootstrapUnit) - store.AddStorer(dataRetriever.StatusMetricsUnit, statusMetricsStorageUnit) - - return store, err + return nil, errors.New("can not create data store") } func createShardDataPoolFromConfig( config *config.Config, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, ) (dataRetriever.PoolsHolder, error) { log.Debug("creatingShardDataPool from config") - txPool, err := shardedData.NewShardedData(getCacherFromConfig(config.TxDataPool)) + txPool, err := txpool.CreateTxPool(storageFactory.GetCacherFromConfig(config.TxDataPool)) if err != nil { log.Error("error creating txpool") return nil, err } - uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) + uTxPool, err := shardedData.NewShardedData(storageFactory.GetCacherFromConfig(config.UnsignedTransactionDataPool)) if err != nil { log.Error("error creating smart contract result pool") return nil, err } - rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) + rewardTxPool, err := shardedData.NewShardedData(storageFactory.GetCacherFromConfig(config.RewardTransactionDataPool)) if err != nil { log.Error("error creating reward transaction pool") return nil, err } - cacherCfg := getCacherFromConfig(config.BlockHeaderDataPool) - hdrPool, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating hdrpool") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.MetaBlockBodyDataPool) - metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating metaBlockBody") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.BlockHeaderNoncesDataPool) - hdrNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating hdrNoncesCacher") - return nil, err - } - hdrNonces, err := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSliceConverter) + hdrPool, err := headersCache.NewHeadersPool(config.HeadersPoolConfig) if err != nil { - log.Error("error creating hdrNonces") + log.Error("error creating headers pool") return nil, err } - cacherCfg = getCacherFromConfig(config.TxBlockBodyDataPool) + cacherCfg := storageFactory.GetCacherFromConfig(config.TxBlockBodyDataPool) txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) if err != nil { log.Error("error creating txBlockBody") return nil, err } - cacherCfg = getCacherFromConfig(config.PeerBlockBodyDataPool) + cacherCfg = storageFactory.GetCacherFromConfig(config.PeerBlockBodyDataPool) peerChangeBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) if err != nil { log.Error("error creating peerChangeBlockBody") return nil, err } + cacherCfg = storageFactory.GetCacherFromConfig(config.TrieNodesDataPool) + trieNodes, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + if err != nil { + log.Info("error creating trieNodes") + return nil, err + } + currBlockTxs, err := dataPool.NewCurrentBlockPool() if err != nil { return nil, err @@ -1258,57 +1089,43 @@ func createShardDataPoolFromConfig( uTxPool, rewardTxPool, hdrPool, - hdrNonces, txBlockBody, peerChangeBlockBody, - metaBlockBody, + trieNodes, currBlockTxs, ) } func createMetaDataPoolFromConfig( config *config.Config, - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, ) (dataRetriever.MetaPoolsHolder, error) { - cacherCfg := getCacherFromConfig(config.MetaBlockBodyDataPool) - metaBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating metaBlockBody") - return nil, err - } - - cacherCfg = getCacherFromConfig(config.TxBlockBodyDataPool) + cacherCfg := storageFactory.GetCacherFromConfig(config.TxBlockBodyDataPool) txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) if err != nil { log.Error("error creating txBlockBody") return nil, err } - cacherCfg = getCacherFromConfig(config.ShardHeadersDataPool) - shardHeaders, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + shardHeaders, err := headersCache.NewHeadersPool(config.HeadersPoolConfig) if err != nil { log.Error("error creating shardHeaders") return nil, err } - headersNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - log.Error("error creating shard headers nonces pool") - return nil, err - } - headersNonces, err := dataPool.NewNonceSyncMapCacher(headersNoncesCacher, uint64ByteSliceConverter) + cacherCfg = storageFactory.GetCacherFromConfig(config.TrieNodesDataPool) + trieNodes, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) if err != nil { - log.Error("error creating shard headers nonces pool") + log.Info("error creating trieNodes") return nil, err } - txPool, err := shardedData.NewShardedData(getCacherFromConfig(config.TxDataPool)) + txPool, err := txpool.CreateTxPool(storageFactory.GetCacherFromConfig(config.TxDataPool)) if err != nil { log.Error("error creating txpool") return nil, err } - uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) + uTxPool, err := shardedData.NewShardedData(storageFactory.GetCacherFromConfig(config.UnsignedTransactionDataPool)) if err != nil { log.Error("error creating smart contract result pool") return nil, err @@ -1320,10 +1137,9 @@ func createMetaDataPoolFromConfig( } return dataPool.NewMetaDataPool( - metaBlockBody, txBlockBody, + trieNodes, shardHeaders, - headersNonces, txPool, uTxPool, currBlockTxs, @@ -1427,6 +1243,7 @@ func newInterceptorAndResolverContainerFactory( network *Network, economics *economics.EconomicsData, headerSigVerifier HeaderSigVerifierHandler, + sizeCheckDelta uint32, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, process.BlackListHandler, error) { if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { @@ -1440,6 +1257,7 @@ func newInterceptorAndResolverContainerFactory( network, economics, headerSigVerifier, + sizeCheckDelta, ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { @@ -1453,6 +1271,7 @@ func newInterceptorAndResolverContainerFactory( state, economics, headerSigVerifier, + sizeCheckDelta, ) } @@ -1469,6 +1288,7 @@ func newShardInterceptorAndResolverContainerFactory( network *Network, economics *economics.EconomicsData, headerSigVerifier HeaderSigVerifierHandler, + sizeCheckDelta uint32, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, process.BlackListHandler, error) { headerBlackList := timecache.NewTimeCache(timeSpanForBadHeaders) interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( @@ -1491,6 +1311,7 @@ func newShardInterceptorAndResolverContainerFactory( headerBlackList, headerSigVerifier, core.ChainID, + sizeCheckDelta, ) if err != nil { return nil, nil, nil, err @@ -1509,6 +1330,8 @@ func newShardInterceptorAndResolverContainerFactory( data.Datapool, core.Uint64ByteSliceConverter, dataPacker, + core.Trie, + sizeCheckDelta, ) if err != nil { return nil, nil, nil, err @@ -1527,6 +1350,7 @@ func newMetaInterceptorAndResolverContainerFactory( state *State, economics *economics.EconomicsData, headerSigVerifier HeaderSigVerifierHandler, + sizeCheckDelta uint32, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, process.BlackListHandler, error) { headerBlackList := timecache.NewTimeCache(timeSpanForBadHeaders) interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( @@ -1549,6 +1373,7 @@ func newMetaInterceptorAndResolverContainerFactory( headerBlackList, headerSigVerifier, core.ChainID, + sizeCheckDelta, ) if err != nil { return nil, nil, nil, err @@ -1567,6 +1392,8 @@ func newMetaInterceptorAndResolverContainerFactory( data.MetaDatapool, core.Uint64ByteSliceConverter, dataPacker, + core.Trie, + sizeCheckDelta, ) if err != nil { return nil, nil, nil, err @@ -1786,17 +1613,58 @@ func createInMemoryShardCoordinatorAndAccount( return newShardCoordinator, accounts, nil } +func newBlockTracker( + processArgs *processComponentsFactoryArgs, + headerValidator process.HeaderConstructionValidator, + requestHandler process.RequestHandler, + rounder consensus.Rounder, + genesisBlocks map[uint32]data.HeaderHandler, +) (process.BlockTracker, error) { + + argBaseTracker := track.ArgBaseTracker{ + Hasher: processArgs.core.Hasher, + HeaderValidator: headerValidator, + Marshalizer: processArgs.core.Marshalizer, + RequestHandler: requestHandler, + Rounder: rounder, + ShardCoordinator: processArgs.shardCoordinator, + Store: processArgs.data.Store, + StartHeaders: genesisBlocks, + } + + if processArgs.shardCoordinator.SelfId() < processArgs.shardCoordinator.NumberOfShards() { + arguments := track.ArgShardTracker{ + ArgBaseTracker: argBaseTracker, + PoolsHolder: processArgs.data.Datapool, + } + + return track.NewShardBlockTrack(arguments) + } + + if processArgs.shardCoordinator.SelfId() == sharding.MetachainShardId { + arguments := track.ArgMetaTracker{ + ArgBaseTracker: argBaseTracker, + PoolsHolder: processArgs.data.MetaDatapool, + } + + return track.NewMetaBlockTrack(arguments) + } + + return nil, errors.New("could not create block tracker") +} + func newForkDetector( rounder consensus.Rounder, shardCoordinator sharding.Coordinator, headerBlackList process.BlackListHandler, + blockTracker process.BlockTracker, genesisTime int64, ) (process.ForkDetector, error) { if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return processSync.NewShardForkDetector(rounder, headerBlackList, genesisTime) + return processSync.NewShardForkDetector(rounder, headerBlackList, blockTracker, genesisTime) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return processSync.NewMetaForkDetector(rounder, headerBlackList, genesisTime) + return processSync.NewMetaForkDetector(rounder, headerBlackList, blockTracker, genesisTime) } return nil, ErrCreateForkDetector @@ -1804,12 +1672,14 @@ func newForkDetector( func newBlockProcessor( processArgs *processComponentsFactoryArgs, - resolversFinder dataRetriever.ResolversFinder, + requestHandler process.RequestHandler, forkDetector process.ForkDetector, - genesisBlocks map[uint32]data.HeaderHandler, rounder consensus.Rounder, + epochStartTrigger epochStart.TriggerHandler, bootStorer process.BootStorer, validatorStatisticsProcessor process.ValidatorStatisticsProcessor, + headerValidator process.HeaderConstructionValidator, + blockTracker process.BlockTracker, ) (process.BlockProcessor, error) { shardCoordinator := processArgs.shardCoordinator @@ -1844,7 +1714,7 @@ func newBlockProcessor( if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return newShardBlockProcessor( - resolversFinder, + requestHandler, processArgs.shardCoordinator, processArgs.nodesCoordinator, specialAddressHolder, @@ -1852,20 +1722,21 @@ func newBlockProcessor( processArgs.core, processArgs.state, forkDetector, - genesisBlocks, processArgs.coreServiceContainer, processArgs.economicsData, rounder, + epochStartTrigger, validatorStatisticsProcessor, bootStorer, processArgs.gasSchedule, - processArgs.requestedItemsHandler, + processArgs.stateCheckpointModulus, + headerValidator, + blockTracker, ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaBlockProcessor( - resolversFinder, + requestHandler, processArgs.shardCoordinator, processArgs.nodesCoordinator, specialAddressHolder, @@ -1873,21 +1744,22 @@ func newBlockProcessor( processArgs.core, processArgs.state, forkDetector, - genesisBlocks, processArgs.coreServiceContainer, processArgs.economicsData, validatorStatisticsProcessor, rounder, + epochStartTrigger, bootStorer, - processArgs.requestedItemsHandler, + headerValidator, + blockTracker, ) } - return nil, errors.New("could not create block processor and tracker") + return nil, errors.New("could not create block processor") } func newShardBlockProcessor( - resolversFinder dataRetriever.ResolversFinder, + requestHandler process.RequestHandler, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, specialAddressHandler process.SpecialAddressHandler, @@ -1895,14 +1767,16 @@ func newShardBlockProcessor( core *Core, state *State, forkDetector process.ForkDetector, - genesisBlocks map[uint32]data.HeaderHandler, coreServiceContainer serviceContainer.Core, economics *economics.EconomicsData, rounder consensus.Rounder, + epochStartTrigger epochStart.TriggerHandler, statisticsProcessor process.ValidatorStatisticsProcessor, bootStorer process.BootStorer, gasSchedule map[string]map[string]uint64, - requestedItemsHandler dataRetriever.RequestedItemsHandler, + stateCheckpointModulus uint, + headerValidator process.HeaderConstructionValidator, + blockTracker process.BlockTracker, ) (process.BlockProcessor, error) { argsParser, err := vmcommon.NewAtArgumentParser() if err != nil { @@ -1967,6 +1841,16 @@ func newShardBlockProcessor( return nil, process.ErrWrongTypeAssertion } + receiptTxInterim, err := interimProcContainer.Get(dataBlock.ReceiptBlock) + if err != nil { + return nil, err + } + + badTxInterim, err := interimProcContainer.Get(dataBlock.InvalidBlock) + if err != nil { + return nil, err + } + txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) if err != nil { return nil, err @@ -1996,21 +1880,6 @@ func newShardBlockProcessor( return nil, err } - requestHandler, err := requestHandlers.NewShardResolverRequestHandler( - resolversFinder, - requestedItemsHandler, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.HeadersTopic, - factory.MetachainBlocksTopic, - MaxTxsToRequest, - ) - if err != nil { - return nil, err - } - rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( state.AccountsAdapter, state.AddressConverter, @@ -2031,6 +1900,8 @@ func newShardBlockProcessor( rewardsTxHandler, txTypeHandler, economics, + receiptTxInterim, + badTxInterim, ) if err != nil { return nil, errors.New("could not create transaction statisticsProcessor: " + err.Error()) @@ -2069,6 +1940,8 @@ func newShardBlockProcessor( } txCoordinator, err := coordinator.NewTransactionCoordinator( + core.Hasher, + core.Marshalizer, shardCoordinator, state.AccountsAdapter, data.Datapool.MiniBlocks(), @@ -2101,19 +1974,22 @@ func newShardBlockProcessor( NodesCoordinator: nodesCoordinator, SpecialAddressHandler: specialAddressHandler, Uint64Converter: core.Uint64ByteSliceConverter, - StartHeaders: genesisBlocks, RequestHandler: requestHandler, Core: coreServiceContainer, BlockChainHook: vmFactory.BlockChainHookImpl(), TxCoordinator: txCoordinator, Rounder: rounder, + EpochStartTrigger: epochStartTrigger, + HeaderValidator: headerValidator, ValidatorStatisticsProcessor: statisticsProcessor, BootStorer: bootStorer, + BlockTracker: blockTracker, } arguments := block.ArgShardProcessor{ - ArgBaseProcessor: argumentsBaseProcessor, - DataPool: data.Datapool, - TxsPoolsCleaner: txPoolsCleaner, + ArgBaseProcessor: argumentsBaseProcessor, + DataPool: data.Datapool, + TxsPoolsCleaner: txPoolsCleaner, + StateCheckpointModulus: stateCheckpointModulus, } blockProcessor, err := block.NewShardProcessor(arguments) @@ -2130,7 +2006,7 @@ func newShardBlockProcessor( } func newMetaBlockProcessor( - resolversFinder dataRetriever.ResolversFinder, + requestHandler process.RequestHandler, shardCoordinator sharding.Coordinator, nodesCoordinator sharding.NodesCoordinator, specialAddressHandler process.SpecialAddressHandler, @@ -2138,13 +2014,14 @@ func newMetaBlockProcessor( core *Core, state *State, forkDetector process.ForkDetector, - genesisBlocks map[uint32]data.HeaderHandler, coreServiceContainer serviceContainer.Core, economics *economics.EconomicsData, validatorStatisticsProcessor process.ValidatorStatisticsProcessor, rounder consensus.Rounder, + epochStartTrigger epochStart.TriggerHandler, bootStorer process.BootStorer, - requestedItemsHandler dataRetriever.RequestedItemsHandler, + headerValidator process.HeaderConstructionValidator, + blockTracker process.BlockTracker, ) (process.BlockProcessor, error) { argsHook := hooks.ArgBlockChainHook{ @@ -2222,20 +2099,6 @@ func newMetaBlockProcessor( return nil, err } - requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( - resolversFinder, - requestedItemsHandler, - factory.ShardHeadersForMetachainTopic, - factory.MetachainBlocksTopic, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.MiniBlocksTopic, - MaxTxsToRequest, - ) - if err != nil { - return nil, err - } - transactionProcessor, err := transaction.NewMetaTxProcessor( state.AccountsAdapter, state.AddressConverter, @@ -2276,6 +2139,8 @@ func newMetaBlockProcessor( } txCoordinator, err := coordinator.NewTransactionCoordinator( + core.Hasher, + core.Marshalizer, shardCoordinator, state.AccountsAdapter, data.MetaDatapool.MiniBlocks(), @@ -2308,6 +2173,27 @@ func newMetaBlockProcessor( return nil, err } + miniBlockHeaderStore := data.Store.GetStorer(dataRetriever.MiniBlockHeaderUnit) + if check.IfNil(miniBlockHeaderStore) { + return nil, errors.New("could not create pending miniblocks handler because of empty miniblock header store") + } + + metaBlocksStore := data.Store.GetStorer(dataRetriever.MetaBlockUnit) + if check.IfNil(metaBlocksStore) { + return nil, errors.New("could not create pending miniblocks handler because of empty metablock store") + } + + argsPendingMiniBlocks := &metachainEpochStart.ArgsPendingMiniBlocks{ + Marshalizer: core.Marshalizer, + Storage: miniBlockHeaderStore, + MetaBlockPool: data.MetaDatapool.Headers(), + MetaBlockStorage: metaBlocksStore, + } + pendingMiniBlocks, err := metachainEpochStart.NewPendingMiniBlocks(argsPendingMiniBlocks) + if err != nil { + return nil, err + } + argumentsBaseProcessor := block.ArgBaseProcessor{ Accounts: state.AccountsAdapter, ForkDetector: forkDetector, @@ -2318,14 +2204,16 @@ func newMetaBlockProcessor( NodesCoordinator: nodesCoordinator, SpecialAddressHandler: specialAddressHandler, Uint64Converter: core.Uint64ByteSliceConverter, - StartHeaders: genesisBlocks, RequestHandler: requestHandler, Core: coreServiceContainer, BlockChainHook: vmFactory.BlockChainHookImpl(), TxCoordinator: txCoordinator, ValidatorStatisticsProcessor: validatorStatisticsProcessor, + EpochStartTrigger: epochStartTrigger, Rounder: rounder, + HeaderValidator: headerValidator, BootStorer: bootStorer, + BlockTracker: blockTracker, } arguments := block.ArgMetaProcessor{ ArgBaseProcessor: argumentsBaseProcessor, @@ -2333,6 +2221,7 @@ func newMetaBlockProcessor( SCDataGetter: scDataGetter, SCToProtocol: smartContractToProtocol, PeerChangesHandler: smartContractToProtocol, + PendingMiniBlocks: pendingMiniBlocks, } metaProcessor, err := block.NewMetaProcessor(arguments) @@ -2369,7 +2258,8 @@ func newValidatorStatisticsProcessor( DataPool: peerDataPool, StorageService: storageService, Marshalizer: processComponents.core.Marshalizer, - Economics: processComponents.economicsData, + StakeValue: processComponents.economicsData.StakeValue(), + Rater: processComponents.rater, } validatorStatisticsProcessor, err := peer.NewValidatorStatisticsProcessor(arguments) @@ -2380,46 +2270,17 @@ func newValidatorStatisticsProcessor( return validatorStatisticsProcessor, nil } -func getCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { - return storageUnit.CacheConfig{ - Size: cfg.Size, - Type: storageUnit.CacheType(cfg.Type), - Shards: cfg.Shards, - } -} - -func getDBFromConfig(cfg config.DBConfig, uniquePath string) storageUnit.DBConfig { - return storageUnit.DBConfig{ - FilePath: filepath.Join(uniquePath, cfg.FilePath), - Type: storageUnit.DBType(cfg.Type), - MaxBatchSize: cfg.MaxBatchSize, - BatchDelaySeconds: cfg.BatchDelaySeconds, - MaxOpenFiles: cfg.MaxOpenFiles, - } -} - -func getBloomFromConfig(cfg config.BloomFilterConfig) storageUnit.BloomConfig { - var hashFuncs []storageUnit.HasherType - if cfg.HashFunc != nil { - hashFuncs = make([]storageUnit.HasherType, 0) - for _, hf := range cfg.HashFunc { - hashFuncs = append(hashFuncs, storageUnit.HasherType(hf)) - } - } - - return storageUnit.BloomConfig{ - Size: cfg.Size, - HashFunc: hashFuncs, - } -} - func generateInMemoryAccountsAdapter( accountFactory state.AccountFactory, hasher hashing.Hasher, marshalizer marshal.Marshalizer, ) (state.AccountsAdapter, error) { + trieStorage, err := trie.NewTrieStorageManagerWithoutPruning(createMemUnit()) + if err != nil { + return nil, err + } - tr, err := trie.NewTrie(createMemUnit(), marshalizer, hasher) + tr, err := trie.NewTrie(trieStorage, marshalizer, hasher) if err != nil { return nil, err } @@ -2439,13 +2300,7 @@ func createMemUnit() storage.Storer { return nil } - persist, err := memorydb.New() - if err != nil { - log.Error("error creating persister for mem unit " + err.Error()) - return nil - } - - unit, err := storageUnit.NewStorageUnit(cache, persist) + unit, err := storageUnit.NewStorageUnit(cache, memorydb.New()) if err != nil { log.Error("error creating unit " + err.Error()) return nil @@ -2455,40 +2310,29 @@ func createMemUnit() storage.Storer { } func createMemMetaDataPool() (dataRetriever.MetaPoolsHolder, error) { - cacherCfg := storageUnit.CacheConfig{Size: 10, Type: storageUnit.LRUCache} - metaBlocks, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - if err != nil { - return nil, err - } - - cacherCfg = storageUnit.CacheConfig{Size: 10, Type: storageUnit.LRUCache, Shards: 1} + cacherCfg := storageUnit.CacheConfig{Size: 10, Type: storageUnit.LRUCache, Shards: 1} txBlockBody, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) if err != nil { return nil, err } - cacherCfg = storageUnit.CacheConfig{Size: 10, Type: storageUnit.LRUCache} - shardHeaders, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + headersCacherCfg := config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 200} + shardHeaders, err := headersCache.NewHeadersPool(headersCacherCfg) if err != nil { return nil, err } - shardHeadersNoncesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + txPool, err := txpool.CreateTxPool(storageUnit.CacheConfig{Size: 1000, Type: storageUnit.LRUCache, Shards: 1}) if err != nil { return nil, err } - shardHeadersNonces, err := dataPool.NewNonceSyncMapCacher(shardHeadersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - if err != nil { - return nil, err - } - - txPool, err := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 1000, Type: storageUnit.LRUCache, Shards: 1}) + uTxPool, err := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 1000, Type: storageUnit.LRUCache, Shards: 1}) if err != nil { return nil, err } - uTxPool, err := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 1000, Type: storageUnit.LRUCache, Shards: 1}) + trieNodesCacher, err := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) if err != nil { return nil, err } @@ -2499,10 +2343,9 @@ func createMemMetaDataPool() (dataRetriever.MetaPoolsHolder, error) { } dPool, err := dataPool.NewMetaDataPool( - metaBlocks, txBlockBody, + trieNodesCacher, shardHeaders, - shardHeadersNonces, txPool, uTxPool, currTxs, diff --git a/cmd/node/factory/structsStatusHandlers.go b/cmd/node/factory/structsStatusHandlers.go index fc9789e051d..ac09432ea33 100644 --- a/cmd/node/factory/structsStatusHandlers.go +++ b/cmd/node/factory/structsStatusHandlers.go @@ -2,7 +2,6 @@ package factory import ( "io" - "net/http" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/typeConverters" @@ -14,17 +13,13 @@ import ( "github.com/ElrondNetwork/elrond-go/statusHandler/persister" "github.com/ElrondNetwork/elrond-go/statusHandler/view" "github.com/ElrondNetwork/elrond-go/storage" - "github.com/pkg/errors" "github.com/urfave/cli" ) -var errPrometheusUrlNotAvailable = errors.New("prometheus URL not available") - // ArgStatusHandlers is a struct that stores arguments needed to create status handlers type ArgStatusHandlers struct { LogViewName string ServersConfigurationFileName string - PrometheusUserName string Ctx *cli.Context Marshalizer marshal.Marshalizer Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter @@ -32,8 +27,6 @@ type ArgStatusHandlers struct { // StatusHandlersInfo is struct that stores all components that are returned when status handlers are created type statusHandlersInfo struct { - PrometheusJoinUrl string - UsePrometheus bool UseTermUI bool StatusHandler core.AppStatusHandler StatusMetrics external.StatusMetricsHandler @@ -45,7 +38,6 @@ type statusHandlersInfo struct { func NewStatusHandlersFactoryArgs( logViewName string, serversConfigurationFileName string, - prometheusUserName string, ctx *cli.Context, marshalizer marshal.Marshalizer, uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, @@ -53,7 +45,6 @@ func NewStatusHandlersFactoryArgs( return &ArgStatusHandlers{ LogViewName: logViewName, ServersConfigurationFileName: serversConfigurationFileName, - PrometheusUserName: prometheusUserName, Ctx: ctx, Marshalizer: marshalizer, Uint64ByteSliceConverter: uint64ByteSliceConverter, @@ -67,12 +58,6 @@ func CreateStatusHandlers(arguments *ArgStatusHandlers) (*statusHandlersInfo, er var err error var handler core.AppStatusHandler - prometheusJoinUrl, usePrometheus := getPrometheusJoinURLIfAvailable(arguments.Ctx, arguments.ServersConfigurationFileName, arguments.PrometheusUserName) - if usePrometheus { - prometheusStatusHandler := statusHandler.NewPrometheusStatusHandler() - appStatusHandlers = append(appStatusHandlers, prometheusStatusHandler) - } - presenterStatusHandler := createStatusHandlerPresenter() useTermui := !arguments.Ctx.GlobalBool(arguments.LogViewName) @@ -122,8 +107,6 @@ func CreateStatusHandlers(arguments *ArgStatusHandlers) (*statusHandlersInfo, er statusHandlersInfo := new(statusHandlersInfo) statusHandlersInfo.StatusHandler = handler - statusHandlersInfo.PrometheusJoinUrl = prometheusJoinUrl - statusHandlersInfo.UsePrometheus = usePrometheus statusHandlersInfo.UseTermUI = useTermui statusHandlersInfo.StatusMetrics = statusMetrics statusHandlersInfo.PersistentHandler = persistentHandler @@ -140,35 +123,6 @@ func (shi *statusHandlersInfo) UpdateStorerAndMetricsForPersistentHandler(store return nil } -func getPrometheusJoinURLIfAvailable(ctx *cli.Context, serversConfigurationFileName string, userPrometheusName string) (string, bool) { - prometheusURLAvailable := true - prometheusJoinUrl, err := getPrometheusJoinURL(ctx.GlobalString(serversConfigurationFileName)) - if err != nil || prometheusJoinUrl == "" { - prometheusURLAvailable = false - } - usePrometheus := ctx.GlobalBool(userPrometheusName) && prometheusURLAvailable - - return prometheusJoinUrl, usePrometheus -} - -func getPrometheusJoinURL(serversConfigurationFileName string) (string, error) { - serversConfig, err := core.LoadServersPConfig(serversConfigurationFileName) - if err != nil { - return "", err - } - baseURL := serversConfig.Prometheus.PrometheusBaseURL - statusURL := baseURL + serversConfig.Prometheus.StatusRoute - resp, err := http.Get(statusURL) - if err != nil { - return "", err - } - if resp.StatusCode == http.StatusNotFound { - return "", errPrometheusUrlNotAvailable - } - joinURL := baseURL + serversConfig.Prometheus.JoinRoute - return joinURL, nil -} - // CreateStatusHandlerPresenter will return an instance of PresenterStatusHandler func createStatusHandlerPresenter() view.Presenter { presenterStatusHandlerFactory := factoryViews.NewPresenterFactory() diff --git a/cmd/node/main.go b/cmd/node/main.go index 40ea67bda4d..de968c95df8 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -30,6 +30,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/facade" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" @@ -41,20 +42,24 @@ import ( "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rating" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/pathmanager" "github.com/ElrondNetwork/elrond-go/storage/timecache" "github.com/google/gops/agent" "github.com/urfave/cli" ) const ( - defaultStatsPath = "stats" - defaultDBPath = "db" - defaultEpochString = "Epoch" - defaultShardString = "Shard" - metachainShardName = "metachain" + defaultStatsPath = "stats" + defaultDBPath = "db" + defaultEpochString = "Epoch" + defaultStaticDbString = "Static" + defaultShardString = "Shard" + metachainShardName = "metachain" ) var ( @@ -200,13 +205,6 @@ VERSION: Usage: "Start the rest API engine in debug mode", } - // networkID defines the version of the network. If set, will override the same parameter from config.toml - networkID = cli.StringFlag{ - Name: "network-id", - Usage: "The network version, overriding the one from config.toml", - Value: "", - } - // nodeDisplayName defines the friendly name used by a node in the public monitoring tools. If set, will override // the NodeDisplayName from config.toml nodeDisplayName = cli.StringFlag{ @@ -215,12 +213,6 @@ VERSION: Value: "", } - // usePrometheus joins the node for prometheus monitoring if set - usePrometheus = cli.BoolFlag{ - Name: "use-prometheus", - Usage: "Will make the node available for prometheus and grafana monitoring", - } - //useLogView is used when termui interface is not needed. useLogView = cli.BoolFlag{ Name: "use-log-view", @@ -278,6 +270,23 @@ VERSION: Value: "", } + isNodefullArchive = cli.BoolFlag{ + Name: "full-archive", + Usage: "If set, the node won't remove any DB", + } + + numEpochsToSave = cli.Uint64Flag{ + Name: "num-epochs-to-keep", + Usage: "This represents the number of epochs which kept in the databases", + Value: uint64(2), + } + + numActivePersisters = cli.Uint64Flag{ + Name: "num-active-persisters", + Usage: "This represents the number of persisters which are kept open at a moment", + Value: uint64(2), + } + rm *statistics.ResourceMonitor ) @@ -289,6 +298,9 @@ var dbIndexer indexer.Indexer // params depending on the type of node we are starting var coreServiceContainer serviceContainer.Core +// TODO: this will be calculated from storage or fetched from network +var currentEpoch = uint32(0) + // appVersion should be populated at build time using ldflags // Usage examples: // linux/mac: @@ -327,18 +339,19 @@ func main() { initialNodesSkPemFile, gopsEn, serversConfigurationFile, - networkID, nodeDisplayName, restApiInterface, restApiDebug, disableAnsiColor, logLevel, - usePrometheus, useLogView, bootstrapRoundIndex, enableTxIndexing, workingDirectory, destinationShardAsObserver, + isNodefullArchive, + numEpochsToSave, + numActivePersisters, } app.Authors = []cli.Author{ { @@ -483,10 +496,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { generalConfig.GeneralSettings.DestinationShardAsObserver = ctx.GlobalString(destinationShardAsObserver.Name) } - if ctx.IsSet(networkID.Name) { - generalConfig.GeneralSettings.NetworkID = ctx.GlobalString(networkID.Name) - } - if ctx.IsSet(nodeDisplayName.Name) { preferencesConfig.Preferences.NodeDisplayName = ctx.GlobalString(nodeDisplayName.Name) } @@ -508,40 +517,78 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("working directory", "path", workingDir) - var shardId = "metachain" - if shardCoordinator.SelfId() != sharding.MetachainShardId { - shardId = fmt.Sprintf("%d", shardCoordinator.SelfId()) - } + var shardId = core.GetShardIdString(shardCoordinator.SelfId()) + + pathTemplateForPruningStorer := filepath.Join( + workingDir, + defaultDBPath, + nodesConfig.ChainID, + fmt.Sprintf("%s_%s", defaultEpochString, core.PathEpochPlaceholder), + fmt.Sprintf("%s_%s", defaultShardString, core.PathShardPlaceholder), + core.PathIdentifierPlaceholder) - uniqueDBFolder := filepath.Join( + pathTemplateForStaticStorer := filepath.Join( workingDir, defaultDBPath, nodesConfig.ChainID, - fmt.Sprintf("%s_%d", defaultEpochString, 0), - fmt.Sprintf("%s_%s", defaultShardString, shardId)) + defaultStaticDbString, + fmt.Sprintf("%s_%s", defaultShardString, core.PathShardPlaceholder), + core.PathIdentifierPlaceholder) + + pathManager, err := pathmanager.NewPathManager(pathTemplateForPruningStorer, pathTemplateForStaticStorer) + if err != nil { + return err + } storageCleanup := ctx.GlobalBool(storageCleanup.Name) if storageCleanup { - log.Trace("cleaning storage", "path", uniqueDBFolder) - err = os.RemoveAll(uniqueDBFolder) + dbPath := filepath.Join( + workingDir, + defaultDBPath) + log.Trace("cleaning storage", "path", dbPath) + err = os.RemoveAll(dbPath) if err != nil { return err } } log.Trace("creating core components") - coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, uniqueDBFolder, []byte(nodesConfig.ChainID)) + coreArgs := factory.NewCoreComponentsFactoryArgs(generalConfig, pathManager, shardId, []byte(nodesConfig.ChainID)) coreComponents, err := factory.CoreComponentsFactory(coreArgs) if err != nil { return err } + log.Trace("creating economics data components") + economicsData, err := economics.NewEconomicsData(economicsConfig) + if err != nil { + return err + } + + rater, err := rating.NewBlockSigningRater(economicsData.RatingsData()) + if err != nil { + return err + } + log.Trace("creating nodes coordinator") + if ctx.IsSet(isNodefullArchive.Name) { + generalConfig.StoragePruning.FullArchive = ctx.GlobalBool(isNodefullArchive.Name) + } + if ctx.IsSet(numEpochsToSave.Name) { + generalConfig.StoragePruning.NumEpochsToKeep = ctx.GlobalUint64(numEpochsToSave.Name) + } + if ctx.IsSet(numActivePersisters.Name) { + generalConfig.StoragePruning.NumActivePersisters = ctx.GlobalUint64(numActivePersisters.Name) + } + + epochStartNotifier := notifier.NewEpochStartSubscriptionHandler() + // TODO: use epochStartNotifier in nodes coordinator nodesCoordinator, err := createNodesCoordinator( nodesConfig, generalConfig.GeneralSettings, pubKey, - coreComponents.Hasher) + coreComponents.Hasher, + rater) if err != nil { return err } @@ -552,7 +599,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { genesisConfig, shardCoordinator, coreComponents, - uniqueDBFolder, + pathManager, ) stateComponents, err := factory.StateComponentsFactory(stateArgs) if err != nil { @@ -560,12 +607,12 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } log.Trace("initializing stats file") - err = initStatsFileMonitor(generalConfig, pubKey, log, workingDir) + err = initStatsFileMonitor(generalConfig, pubKey, log, workingDir, pathManager, shardId) if err != nil { return err } - handlersArgs := factory.NewStatusHandlersFactoryArgs(useLogView.Name, serversConfigurationFile.Name, usePrometheus.Name, ctx, coreComponents.Marshalizer, coreComponents.Uint64ByteSliceConverter) + handlersArgs := factory.NewStatusHandlersFactoryArgs(useLogView.Name, serversConfigurationFile.Name, ctx, coreComponents.Marshalizer, coreComponents.Uint64ByteSliceConverter) statusHandlersInfo, err := factory.CreateStatusHandlers(handlersArgs) if err != nil { return err @@ -577,7 +624,7 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { metrics.InitMetrics(coreComponents.StatusHandler, pubKey, nodeType, shardCoordinator, nodesConfig, version, economicsConfig) log.Trace("creating data components") - dataArgs := factory.NewDataComponentsFactoryArgs(generalConfig, shardCoordinator, coreComponents, uniqueDBFolder) + dataArgs := factory.NewDataComponentsFactoryArgs(generalConfig, shardCoordinator, coreComponents, pathManager, epochStartNotifier, currentEpoch) dataComponents, err := factory.DataComponentsFactory(dataArgs) if err != nil { return err @@ -663,12 +710,6 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { } } - log.Trace("creating economics data components") - economicsData, err := economics.NewEconomicsData(economicsConfig) - if err != nil { - return err - } - gasScheduleConfigurationFileName := ctx.GlobalString(gasScheduleConfigurationFile.Name) gasSchedule, err := core.LoadGasScheduleConfig(gasScheduleConfigurationFileName) if err != nil { @@ -695,6 +736,12 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { networkComponents, coreServiceContainer, requestedItemsHandler, + epochStartNotifier, + &generalConfig.EpochStartConfig, + 0, + rater, + generalConfig.Marshalizer.SizeCheckDelta, + generalConfig.StateTrieConfig.RoundsModulus, ) processComponents, err := factory.ProcessComponentsFactory(processArgs) if err != nil { @@ -787,11 +834,8 @@ func startNode(ctx *cli.Context, log logger.Logger, version string) error { ef := facade.NewElrondNodeFacade(currentNode, apiResolver, restAPIServerDebugMode) efConfig := &config.FacadeConfig{ - RestApiInterface: ctx.GlobalString(restApiInterface.Name), - PprofEnabled: ctx.GlobalBool(profileMode.Name), - Prometheus: statusHandlersInfo.UsePrometheus, - PrometheusJoinURL: statusHandlersInfo.PrometheusJoinUrl, - PrometheusJobName: generalConfig.GeneralSettings.NetworkID, + RestApiInterface: ctx.GlobalString(restApiInterface.Name), + PprofEnabled: ctx.GlobalBool(profileMode.Name), } ef.SetSyncer(syncer) @@ -935,6 +979,7 @@ func createNodesCoordinator( settingsConfig config.GeneralSettingsConfig, pubKey crypto.PublicKey, hasher hashing.Hasher, + rater sharding.RaterHandler, ) (sharding.NodesCoordinator, error) { shardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) @@ -978,12 +1023,20 @@ func createNodesCoordinator( Nodes: initValidators, SelfPublicKey: pubKeyBytes, } - nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + + baseNodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) if err != nil { return nil, err } - return nodesCoordinator, nil + //TODO fix IndexHashedNodesCoordinatorWithRater as to perform better when expanding eligible list based on rating + // do not forget to return nodesCoordinator from this function instead of baseNodesCoordinator + //nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinatorWithRater(baseNodesCoordinator, rater) + //if err != nil { + // return nil, err + //} + + return baseNodesCoordinator, nil } func processDestinationShardAsObserver(settingsConfig config.GeneralSettingsConfig) (uint32, error) { @@ -1074,7 +1127,7 @@ func createNode( nd, err := node.NewNode( node.WithMessenger(network.NetMessenger), node.WithHasher(core.Hasher), - node.WithMarshalizer(core.Marshalizer), + node.WithMarshalizer(core.Marshalizer, config.Marshalizer.SizeCheckDelta), node.WithTxFeeHandler(economicsData), node.WithInitialNodesPubKeys(crypto.InitialPubKeys), node.WithAddressConverter(state.AddressConverter), @@ -1107,12 +1160,14 @@ func createNode( node.WithBootstrapRoundIndex(bootstrapRoundIndex), node.WithAppStatusHandler(core.StatusHandler), node.WithIndexer(indexer), + node.WithEpochStartTrigger(process.EpochStartTrigger), node.WithBlackListHandler(process.BlackListHandler), node.WithBootStorer(process.BootStorer), node.WithRequestedItemsHandler(requestedItemsHandler), node.WithHeaderSigVerifier(process.HeaderSigVerifier), node.WithValidatorStatistics(process.ValidatorsStatistics), node.WithChainID(core.ChainID), + node.WithBlockTracker(process.BlockTracker), ) if err != nil { return nil, errors.New("error creating node: " + err.Error()) @@ -1145,8 +1200,15 @@ func createNode( return nd, nil } -func initStatsFileMonitor(config *config.Config, pubKey crypto.PublicKey, log logger.Logger, - workingDir string) error { +func initStatsFileMonitor( + config *config.Config, + pubKey crypto.PublicKey, + log logger.Logger, + workingDir string, + pathManager storage.PathManagerHandler, + shardId string, +) error { + publicKey, err := pubKey.ToByteArray() if err != nil { return err @@ -1158,7 +1220,7 @@ func initStatsFileMonitor(config *config.Config, pubKey crypto.PublicKey, log lo if err != nil { return err } - err = startStatisticsMonitor(statsFile, config.ResourceStats, log) + err = startStatisticsMonitor(statsFile, config, log, pathManager, shardId) if err != nil { return err } @@ -1186,12 +1248,18 @@ func setServiceContainer(shardCoordinator sharding.Coordinator, tpsBenchmark *st return errors.New("could not init core service container") } -func startStatisticsMonitor(file *os.File, config config.ResourceStatsConfig, log logger.Logger) error { - if !config.Enabled { +func startStatisticsMonitor( + file *os.File, + generalConfig *config.Config, + log logger.Logger, + pathManager storage.PathManagerHandler, + shardId string, +) error { + if !generalConfig.ResourceStats.Enabled { return nil } - if config.RefreshIntervalInSec < 1 { + if generalConfig.ResourceStats.RefreshIntervalInSec < 1 { return errors.New("invalid RefreshIntervalInSec in section [ResourceStats]. Should be an integer higher than 1") } @@ -1202,9 +1270,9 @@ func startStatisticsMonitor(file *os.File, config config.ResourceStatsConfig, lo go func() { for { - err = resMon.SaveStatistics() + err = resMon.SaveStatistics(generalConfig, pathManager, shardId) log.LogIfError(err) - time.Sleep(time.Second * time.Duration(config.RefreshIntervalInSec)) + time.Sleep(time.Second * time.Duration(generalConfig.ResourceStats.RefreshIntervalInSec)) } }() diff --git a/config/config.go b/config/config.go index 23d2f79abfc..69b2f5f4a4d 100644 --- a/config/config.go +++ b/config/config.go @@ -7,6 +7,12 @@ type CacheConfig struct { Shards uint32 `json:"shards"` } +//HeadersPoolConfig will map the headers cache configuration +type HeadersPoolConfig struct { + MaxHeadersPerShard int + NumElementsToRemoveOnEviction int +} + // DBConfig will map the json db configuration type DBConfig struct { FilePath string `json:"file"` @@ -46,6 +52,12 @@ type TypeConfig struct { Type string `json:"type"` } +// MarshalizerConfig holds the marshalizer related configuration +type MarshalizerConfig struct { + Type string `json:"type"` + SizeCheckDelta uint32 `json:"sizeCheckDelta"` +} + // NTPConfig will hold the configuration for NTP queries type NTPConfig struct { Hosts []string @@ -54,9 +66,22 @@ type NTPConfig struct { Version int } +// EvictionWaitingListConfig will hold the configuration for the EvictionWaitingList +type EvictionWaitingListConfig struct { + Size uint `json:"size"` + DB DBConfig `json:"db"` +} + +// EpochStartConfig will hold the configuration of EpochStart settings +type EpochStartConfig struct { + MinRoundsBetweenEpochs int64 + RoundsPerEpoch int64 +} + // Config will hold the entire application configuration parameters type Config struct { MiniBlocksStorage StorageConfig + MiniBlockHeadersStorage StorageConfig PeerBlockBodyStorage StorageConfig BlockHeaderStorage StorageConfig TxStorage StorageConfig @@ -73,36 +98,34 @@ type Config struct { AccountsTrieStorage StorageConfig PeerAccountsTrieStorage StorageConfig + TrieSnapshotDB DBConfig + EvictionWaitingList EvictionWaitingListConfig + StateTrieConfig StateTrieConfig BadBlocksCache CacheConfig TxBlockBodyDataPool CacheConfig - StateBlockBodyDataPool CacheConfig PeerBlockBodyDataPool CacheConfig - BlockHeaderDataPool CacheConfig - BlockHeaderNoncesDataPool CacheConfig TxDataPool CacheConfig UnsignedTransactionDataPool CacheConfig RewardTransactionDataPool CacheConfig - MetaBlockBodyDataPool CacheConfig - - MiniBlockHeaderHashesDataPool CacheConfig - ShardHeadersDataPool CacheConfig - MetaHeaderNoncesDataPool CacheConfig - - Logger LoggerConfig - Address AddressConfig - BLSPublicKey AddressConfig - Hasher TypeConfig - MultisigHasher TypeConfig - Marshalizer TypeConfig + TrieNodesDataPool CacheConfig + EpochStartConfig EpochStartConfig + Logger LoggerConfig + Address AddressConfig + BLSPublicKey AddressConfig + Hasher TypeConfig + MultisigHasher TypeConfig + Marshalizer MarshalizerConfig ResourceStats ResourceStatsConfig Heartbeat HeartbeatConfig GeneralSettings GeneralSettingsConfig Consensus TypeConfig Explorer ExplorerConfig + StoragePruning StoragePruningConfig - NTPConfig NTPConfig + NTPConfig NTPConfig + HeadersPoolConfig HeadersPoolConfig } // NodeConfig will hold basic p2p settings @@ -112,6 +135,14 @@ type NodeConfig struct { TargetPeerCount int } +// StoragePruningConfig will hold settings relates to storage pruning +type StoragePruningConfig struct { + Enabled bool + FullArchive bool + NumEpochsToKeep uint64 + NumActivePersisters uint64 +} + // KadDhtPeerDiscoveryConfig will hold the kad-dht discovery config settings type KadDhtPeerDiscoveryConfig struct { Enabled bool @@ -144,7 +175,6 @@ type HeartbeatConfig struct { // GeneralSettingsConfig will hold the general settings for a node type GeneralSettingsConfig struct { DestinationShardAsObserver string - NetworkID string StatusPollingIntervalSec int } @@ -157,14 +187,6 @@ type ExplorerConfig struct { // ServersConfig will hold all the confidential settings for servers type ServersConfig struct { ElasticSearch ElasticSearchConfig - Prometheus PrometheusConfig -} - -// PrometheusConfig will hold configuration for prometheus, such as the join URL -type PrometheusConfig struct { - PrometheusBaseURL string - JoinRoute string - StatusRoute string } // ElasticSearchConfig will hold the configuration for the elastic search @@ -175,9 +197,12 @@ type ElasticSearchConfig struct { // FacadeConfig will hold different configuration option that will be passed to the main ElrondFacade type FacadeConfig struct { - RestApiInterface string - PprofEnabled bool - Prometheus bool - PrometheusJoinURL string - PrometheusJobName string + RestApiInterface string + PprofEnabled bool +} + +// StateTrieConfig will hold information about state trie +type StateTrieConfig struct { + RoundsModulus uint + PruningEnabled bool } diff --git a/config/economicsConfig.go b/config/economicsConfig.go index 8e1aeb884e4..0266d50a812 100644 --- a/config/economicsConfig.go +++ b/config/economicsConfig.go @@ -17,9 +17,11 @@ type RewardsSettings struct { // FeeSettings will hold economics fee settings type FeeSettings struct { - MaxGasLimitPerBlock string - MinGasPrice string - MinGasLimit string + MaxGasLimitPerBlock string + GasPerDataByte string + DataLimitForBaseCalc string + MinGasPrice string + MinGasLimit string } // ValidatorSettings will hold the validator settings @@ -28,10 +30,28 @@ type ValidatorSettings struct { UnBoundPeriod string } +// RatingSettings will hold rating settings +type RatingSettings struct { + StartRating uint32 + MaxRating uint32 + MinRating uint32 + ProposerIncreaseRatingStep uint32 + ProposerDecreaseRatingStep uint32 + ValidatorIncreaseRatingStep uint32 + ValidatorDecreaseRatingStep uint32 +} + +//RatingValue will hold different rating options with increase and decresea steps +type RatingValue struct { + Name string + Value int32 +} + // ConfigEconomics will hold economics config type ConfigEconomics struct { EconomicsAddresses EconomicsAddresses RewardsSettings RewardsSettings FeeSettings FeeSettings ValidatorSettings ValidatorSettings + RatingSettings RatingSettings } diff --git a/consensus/broadcast/metaChainMessenger.go b/consensus/broadcast/metaChainMessenger.go index 49dd8e170c0..d7759420ebc 100644 --- a/consensus/broadcast/metaChainMessenger.go +++ b/consensus/broadcast/metaChainMessenger.go @@ -106,13 +106,6 @@ func (mcm *metaChainMessenger) BroadcastBlock(blockBody data.BodyHandler, header return nil } -// BroadcastShardHeader will send on meta-to-shards topic the header -func (mcm *metaChainMessenger) BroadcastShardHeader(header data.HeaderHandler) error { - // meta chain does not need to broadcast separately the header, as it have no body and BroadcastBlock does all - // the job for it, but this method is created to satisfy the BroadcastMessenger interface - return nil -} - // BroadcastHeader will send on metachain blocks topic the header func (mcm *metaChainMessenger) BroadcastHeader(header data.HeaderHandler) error { if header == nil || header.IsInterfaceNil() { diff --git a/consensus/broadcast/metaChainMessenger_test.go b/consensus/broadcast/metaChainMessenger_test.go index 7ae4de59805..0ecc1d4ea18 100644 --- a/consensus/broadcast/metaChainMessenger_test.go +++ b/consensus/broadcast/metaChainMessenger_test.go @@ -180,25 +180,6 @@ func TestMetaChainMessenger_BroadcastBlockShouldWork(t *testing.T) { assert.Nil(t, err) } -func TestMetaChainMessenger_BroadcastShardHeaderShouldWork(t *testing.T) { - marshalizerMock := &mock.MarshalizerMock{} - messengerMock := &mock.MessengerStub{} - privateKeyMock := &mock.PrivateKeyMock{} - shardCoordinatorMock := &mock.ShardCoordinatorMock{} - singleSignerMock := &mock.SingleSignerMock{} - - mcm, _ := broadcast.NewMetaChainMessenger( - marshalizerMock, - messengerMock, - privateKeyMock, - shardCoordinatorMock, - singleSignerMock, - ) - - err := mcm.BroadcastShardHeader(nil) - assert.Nil(t, err) -} - func TestMetaChainMessenger_BroadcastMiniBlocksShouldWork(t *testing.T) { marshalizerMock := &mock.MarshalizerMock{} messengerMock := &mock.MessengerStub{} diff --git a/consensus/broadcast/shardChainMessenger.go b/consensus/broadcast/shardChainMessenger.go index 6eddf2052fa..40601bcf0d6 100644 --- a/consensus/broadcast/shardChainMessenger.go +++ b/consensus/broadcast/shardChainMessenger.go @@ -100,33 +100,15 @@ func (scm *shardChainMessenger) BroadcastBlock(blockBody data.BodyHandler, heade return err } + headerIdentifier := scm.shardCoordinator.CommunicationIdentifier(sharding.MetachainShardId) selfIdentifier := scm.shardCoordinator.CommunicationIdentifier(scm.shardCoordinator.SelfId()) - go scm.messenger.Broadcast(factory.HeadersTopic+selfIdentifier, msgHeader) + go scm.messenger.Broadcast(factory.ShardBlocksTopic+headerIdentifier, msgHeader) go scm.messenger.Broadcast(factory.MiniBlocksTopic+selfIdentifier, msgBlockBody) return nil } -// BroadcastShardHeader will send on shard headers for metachain topic the header -func (scm *shardChainMessenger) BroadcastShardHeader(header data.HeaderHandler) error { - if header == nil || header.IsInterfaceNil() { - return spos.ErrNilHeader - } - - msgHeader, err := scm.marshalizer.Marshal(header) - if err != nil { - return err - } - - shardHeaderForMetachainTopic := factory.ShardHeadersForMetachainTopic + - scm.shardCoordinator.CommunicationIdentifier(sharding.MetachainShardId) - - go scm.messenger.Broadcast(shardHeaderForMetachainTopic, msgHeader) - - return nil -} - // BroadcastHeader will send on in-shard headers topic the header func (scm *shardChainMessenger) BroadcastHeader(header data.HeaderHandler) error { if header == nil || header.IsInterfaceNil() { @@ -138,9 +120,8 @@ func (scm *shardChainMessenger) BroadcastHeader(header data.HeaderHandler) error return err } - selfIdentifier := scm.shardCoordinator.CommunicationIdentifier(scm.shardCoordinator.SelfId()) - - go scm.messenger.Broadcast(factory.HeadersTopic+selfIdentifier, msgHeader) + shardIdentifier := scm.shardCoordinator.CommunicationIdentifier(sharding.MetachainShardId) + go scm.messenger.Broadcast(factory.ShardBlocksTopic+shardIdentifier, msgHeader) return nil } diff --git a/consensus/broadcast/shardChainMessenger_test.go b/consensus/broadcast/shardChainMessenger_test.go index ff351ce425f..380d0f8c371 100644 --- a/consensus/broadcast/shardChainMessenger_test.go +++ b/consensus/broadcast/shardChainMessenger_test.go @@ -201,67 +201,6 @@ func TestShardChainMessenger_BroadcastBlockShouldWork(t *testing.T) { assert.Nil(t, err) } -func TestShardChainMessenger_BroadcastShardHeaderShouldErrNilHeader(t *testing.T) { - marshalizerMock := &mock.MarshalizerMock{} - messengerMock := &mock.MessengerStub{} - privateKeyMock := &mock.PrivateKeyMock{} - shardCoordinatorMock := &mock.ShardCoordinatorMock{} - singleSignerMock := &mock.SingleSignerMock{} - - scm, _ := broadcast.NewShardChainMessenger( - marshalizerMock, - messengerMock, - privateKeyMock, - shardCoordinatorMock, - singleSignerMock, - ) - - err := scm.BroadcastShardHeader(nil) - assert.Equal(t, spos.ErrNilHeader, err) -} - -func TestShardChainMessenger_BroadcastShardHeaderShouldErrMockMarshalizer(t *testing.T) { - marshalizerMock := &mock.MarshalizerMock{} - messengerMock := &mock.MessengerStub{} - privateKeyMock := &mock.PrivateKeyMock{} - shardCoordinatorMock := &mock.ShardCoordinatorMock{} - singleSignerMock := &mock.SingleSignerMock{} - marshalizerMock.Fail = true - - scm, _ := broadcast.NewShardChainMessenger( - marshalizerMock, - messengerMock, - privateKeyMock, - shardCoordinatorMock, - singleSignerMock, - ) - - err := scm.BroadcastShardHeader(&block.Header{}) - assert.Equal(t, mock.ErrMockMarshalizer, err) -} - -func TestShardChainMessenger_BroadcastShardHeaderShouldWork(t *testing.T) { - marshalizerMock := &mock.MarshalizerMock{} - messengerMock := &mock.MessengerStub{ - BroadcastCalled: func(topic string, buff []byte) { - }, - } - privateKeyMock := &mock.PrivateKeyMock{} - shardCoordinatorMock := &mock.ShardCoordinatorMock{} - singleSignerMock := &mock.SingleSignerMock{} - - scm, _ := broadcast.NewShardChainMessenger( - marshalizerMock, - messengerMock, - privateKeyMock, - shardCoordinatorMock, - singleSignerMock, - ) - - err := scm.BroadcastShardHeader(&block.Header{}) - assert.Nil(t, err) -} - func TestShardChainMessenger_BroadcastMiniBlocksShouldBeDone(t *testing.T) { var channelCalled chan bool channelCalled = make(chan bool, 100) diff --git a/consensus/chronology/chronology.go b/consensus/chronology/chronology.go index 445ca4bea1f..aa689a0b023 100644 --- a/consensus/chronology/chronology.go +++ b/consensus/chronology/chronology.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/ntp" @@ -53,7 +54,8 @@ func NewChronology( genesisTime: genesisTime, rounder: rounder, syncTimer: syncTimer, - appStatusHandler: statusHandler.NewNilStatusHandler()} + appStatusHandler: statusHandler.NewNilStatusHandler(), + } chr.subroundId = srBeforeStartRound @@ -68,11 +70,10 @@ func checkNewChronologyParams( syncTimer ntp.SyncTimer, ) error { - if rounder == nil || rounder.IsInterfaceNil() { + if check.IfNil(rounder) { return ErrNilRounder } - - if syncTimer == nil || syncTimer.IsInterfaceNil() { + if check.IfNil(syncTimer) { return ErrNilSyncTimer } diff --git a/consensus/chronology/chronology_test.go b/consensus/chronology/chronology_test.go index 4a096d4aed1..131a56c90d6 100644 --- a/consensus/chronology/chronology_test.go +++ b/consensus/chronology/chronology_test.go @@ -34,7 +34,8 @@ func TestChronology_NewChronologyNilRounderShouldFail(t *testing.T) { chr, err := chronology.NewChronology( genesisTime, nil, - syncTimerMock) + syncTimerMock, + ) assert.Nil(t, chr) assert.Equal(t, err, chronology.ErrNilRounder) @@ -47,7 +48,8 @@ func TestChronology_NewChronologyNilSyncerShouldFail(t *testing.T) { chr, err := chronology.NewChronology( genesisTime, rounderMock, - nil) + nil, + ) assert.Nil(t, chr) assert.Equal(t, err, chronology.ErrNilSyncTimer) @@ -61,7 +63,8 @@ func TestChronology_NewChronologyShouldWork(t *testing.T) { chr, err := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) assert.NotNil(t, chr) assert.Nil(t, err) @@ -75,7 +78,8 @@ func TestChronology_AddSubroundShouldWork(t *testing.T) { chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) chr.AddSubround(initSubroundHandlerMock()) chr.AddSubround(initSubroundHandlerMock()) @@ -92,7 +96,8 @@ func TestChronology_RemoveAllSubroundsShouldReturnEmptySubroundHandlersArray(t * chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) chr.AddSubround(initSubroundHandlerMock()) chr.AddSubround(initSubroundHandlerMock()) @@ -114,7 +119,8 @@ func TestChronology_StartRoundShouldReturnWhenRoundIndexIsNegative(t *testing.T) chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) srm := initSubroundHandlerMock() chr.AddSubround(srm) @@ -132,7 +138,8 @@ func TestChronology_StartRoundShouldReturnWhenLoadSubroundHandlerReturnsNil(t *t chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) initSubroundHandlerMock() chr.StartRound() @@ -149,7 +156,8 @@ func TestChronology_StartRoundShouldReturnWhenDoWorkReturnsFalse(t *testing.T) { chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) srm := initSubroundHandlerMock() chr.AddSubround(srm) @@ -168,7 +176,8 @@ func TestChronology_StartRoundShouldWork(t *testing.T) { chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) srm := initSubroundHandlerMock() srm.DoWorkCalled = func(rounder consensus.Rounder) bool { @@ -189,7 +198,8 @@ func TestChronology_UpdateRoundShouldInitRound(t *testing.T) { chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) srm := initSubroundHandlerMock() chr.AddSubround(srm) @@ -206,7 +216,8 @@ func TestChronology_LoadSubrounderShouldReturnNilWhenSubroundHandlerNotExists(t chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) assert.Nil(t, chr.LoadSubroundHandler(0)) } @@ -219,7 +230,8 @@ func TestChronology_LoadSubrounderShouldReturnNilWhenIndexIsOutOfBound(t *testin chr, _ := chronology.NewChronology( genesisTime, rounderMock, - syncTimerMock) + syncTimerMock, + ) chr.AddSubround(initSubroundHandlerMock()) chr.SetSubroundHandlers(make([]consensus.SubroundHandler, 0)) @@ -234,7 +246,8 @@ func TestChronology_InitRoundShouldNotSetSubroundWhenRoundIndexIsNegative(t *tes chr, _ := chronology.NewChronology( syncTimerMock.CurrentTime(), rounderMock, - syncTimerMock) + syncTimerMock, + ) chr.AddSubround(initSubroundHandlerMock()) rounderMock.IndexCalled = func() int64 { @@ -253,7 +266,8 @@ func TestChronology_InitRoundShouldSetSubroundWhenRoundIndexIsPositive(t *testin chr, _ := chronology.NewChronology( syncTimerMock.CurrentTime(), rounderMock, - syncTimerMock) + syncTimerMock, + ) sr := initSubroundHandlerMock() chr.AddSubround(sr) @@ -269,7 +283,8 @@ func TestChronology_StartRoundShouldNotUpdateRoundWhenCurrentRoundIsNotFinished( chr, _ := chronology.NewChronology( syncTimerMock.CurrentTime(), rounderMock, - syncTimerMock) + syncTimerMock, + ) chr.SetSubroundId(0) chr.StartRound() @@ -285,7 +300,8 @@ func TestChronology_StartRoundShouldUpdateRoundWhenCurrentRoundIsFinished(t *tes chr, _ := chronology.NewChronology( syncTimerMock.CurrentTime(), rounderMock, - syncTimerMock) + syncTimerMock, + ) chr.SetSubroundId(-1) chr.StartRound() @@ -301,7 +317,8 @@ func TestChronology_SetAppStatusHandlerWithNilValueShouldErr(t *testing.T) { chr, _ := chronology.NewChronology( syncTimerMock.CurrentTime(), rounderMock, - syncTimerMock) + syncTimerMock, + ) err := chr.SetAppStatusHandler(nil) assert.Equal(t, err, chronology.ErrNilAppStatusHandler) @@ -315,7 +332,8 @@ func TestChronology_SetAppStatusHandlerWithOkValueShouldPass(t *testing.T) { chr, _ := chronology.NewChronology( syncTimerMock.CurrentTime(), rounderMock, - syncTimerMock) + syncTimerMock, + ) err := chr.SetAppStatusHandler(&mock.AppStatusHandlerMock{}) @@ -331,7 +349,8 @@ func TestChronology_CheckIfStatusHandlerWorks(t *testing.T) { chr, _ := chronology.NewChronology( syncTimerMock.CurrentTime(), rounderMock, - syncTimerMock) + syncTimerMock, + ) err := chr.SetAppStatusHandler(&mock.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { diff --git a/consensus/interface.go b/consensus/interface.go index 512e817d0a6..94c29bac2ef 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -56,7 +56,6 @@ type SposFactory interface { type BroadcastMessenger interface { BroadcastBlock(data.BodyHandler, data.HeaderHandler) error BroadcastHeader(data.HeaderHandler) error - BroadcastShardHeader(data.HeaderHandler) error BroadcastMiniBlocks(map[uint32][]byte) error BroadcastTransactions(map[string][][]byte) error BroadcastConsensusMessage(*Message) error diff --git a/consensus/message.go b/consensus/message.go index 0b693379e23..998445a85a6 100644 --- a/consensus/message.go +++ b/consensus/message.go @@ -10,7 +10,6 @@ type Message struct { PubKey []byte Signature []byte MsgType int - TimeStamp uint64 RoundIndex int64 ChainID []byte } @@ -22,7 +21,6 @@ func NewConsensusMessage( pubKey []byte, sig []byte, msg int, - tms uint64, roundIndex int64, chainID []byte, ) *Message { @@ -33,7 +31,6 @@ func NewConsensusMessage( PubKey: pubKey, Signature: sig, MsgType: msg, - TimeStamp: tms, RoundIndex: roundIndex, ChainID: chainID, } diff --git a/consensus/message_test.go b/consensus/message_test.go index d4d67cdd340..63ea94ffaac 100644 --- a/consensus/message_test.go +++ b/consensus/message_test.go @@ -17,7 +17,6 @@ func TestConsensusMessage_NewConsensusMessageShouldWork(t *testing.T) { nil, -1, 0, - 0, []byte("chain ID"), ) diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 8a40990f9dc..4988a18a237 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorMock mocks the implementation for a blockProcessor @@ -17,7 +18,7 @@ type BlockProcessorMock struct { CreateBlockCalled func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.BodyHandler, error) RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - ApplyBodyToHeaderCalled func(hdr data.HeaderHandler, body data.BodyHandler) error + ApplyBodyToHeaderCalled func(hdr data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) DecodeBlockBodyCalled func(dta []byte) data.BodyHandler DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler @@ -29,7 +30,7 @@ func (blProcMock *BlockProcessorMock) SetNumProcessedObj(numObj uint64) { } -func (blProcMock *BlockProcessorMock) ApplyProcessedMiniBlocks(miniBlocks map[string]map[string]struct{}) { +func (blProcMock *BlockProcessorMock) ApplyProcessedMiniBlocks(miniBlocks *processedMb.ProcessedMiniBlockTracker) { } @@ -73,7 +74,7 @@ func (blProcMock *BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHa return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } -func (blProcMock BlockProcessorMock) ApplyBodyToHeader(hdr data.HeaderHandler, body data.BodyHandler) error { +func (blProcMock BlockProcessorMock) ApplyBodyToHeader(hdr data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { return blProcMock.ApplyBodyToHeaderCalled(hdr, body) } diff --git a/consensus/mock/broadcastMessangerMock.go b/consensus/mock/broadcastMessangerMock.go index 781e4ba1c5e..02fac7d99a6 100644 --- a/consensus/mock/broadcastMessangerMock.go +++ b/consensus/mock/broadcastMessangerMock.go @@ -8,7 +8,6 @@ import ( type BroadcastMessengerMock struct { BroadcastBlockCalled func(data.BodyHandler, data.HeaderHandler) error BroadcastHeaderCalled func(data.HeaderHandler) error - BroadcastShardHeaderCalled func(data.HeaderHandler) error BroadcastMiniBlocksCalled func(map[uint32][]byte) error BroadcastTransactionsCalled func(map[string][][]byte) error BroadcastConsensusMessageCalled func(*consensus.Message) error @@ -21,13 +20,6 @@ func (bmm *BroadcastMessengerMock) BroadcastBlock(bodyHandler data.BodyHandler, return nil } -func (bmm *BroadcastMessengerMock) BroadcastShardHeader(headerHandler data.HeaderHandler) error { - if bmm.BroadcastShardHeaderCalled != nil { - return bmm.BroadcastShardHeaderCalled(headerHandler) - } - return nil -} - func (bmm *BroadcastMessengerMock) BroadcastMiniBlocks(miniBlocks map[uint32][]byte) error { if bmm.BroadcastMiniBlocksCalled != nil { return bmm.BroadcastMiniBlocksCalled(miniBlocks) diff --git a/consensus/mock/chronologyHandlerMock.go b/consensus/mock/chronologyHandlerMock.go index a9dd1e7934e..275852de93b 100644 --- a/consensus/mock/chronologyHandlerMock.go +++ b/consensus/mock/chronologyHandlerMock.go @@ -8,6 +8,14 @@ type ChronologyHandlerMock struct { AddSubroundCalled func(consensus.SubroundHandler) RemoveAllSubroundsCalled func() StartRoundCalled func() + EpochCalled func() uint32 +} + +func (chrm *ChronologyHandlerMock) Epoch() uint32 { + if chrm.EpochCalled != nil { + return chrm.EpochCalled() + } + return 0 } func (chrm *ChronologyHandlerMock) AddSubround(subroundHandler consensus.SubroundHandler) { diff --git a/consensus/mock/forkDetectorMock.go b/consensus/mock/forkDetectorMock.go index a7253c5cd22..bc2fc72b5fb 100644 --- a/consensus/mock/forkDetectorMock.go +++ b/consensus/mock/forkDetectorMock.go @@ -6,26 +6,28 @@ import ( ) type ForkDetectorMock struct { - AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error - RemoveHeadersCalled func(nonce uint64, hash []byte) + AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error + RemoveHeaderCalled func(nonce uint64, hash []byte) CheckForkCalled func() *process.ForkInfo GetHighestFinalBlockNonceCalled func() uint64 + GetHighestFinalBlockHashCalled func() []byte ProbableHighestNonceCalled func() uint64 - ResetProbableHighestNonceCalled func() ResetForkCalled func() GetNotarizedHeaderHashCalled func(nonce uint64) []byte + SetRollBackNonceCalled func(nonce uint64) + RestoreToGenesisCalled func() } -func (f *ForkDetectorMock) RestoreFinalCheckPointToGenesis() { - +func (fdm *ForkDetectorMock) RestoreToGenesis() { + fdm.RestoreToGenesisCalled() } -func (fdm *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { - return fdm.AddHeaderCalled(header, hash, state, finalHeaders, finalHeadersHashes, isNotarizedShardStuck) +func (fdm *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) } -func (fdm *ForkDetectorMock) RemoveHeaders(nonce uint64, hash []byte) { - fdm.RemoveHeadersCalled(nonce, hash) +func (fdm *ForkDetectorMock) RemoveHeader(nonce uint64, hash []byte) { + fdm.RemoveHeaderCalled(nonce, hash) } func (fdm *ForkDetectorMock) CheckFork() *process.ForkInfo { @@ -36,12 +38,18 @@ func (fdm *ForkDetectorMock) GetHighestFinalBlockNonce() uint64 { return fdm.GetHighestFinalBlockNonceCalled() } +func (fdm *ForkDetectorMock) GetHighestFinalBlockHash() []byte { + return fdm.GetHighestFinalBlockHashCalled() +} + func (fdm *ForkDetectorMock) ProbableHighestNonce() uint64 { return fdm.ProbableHighestNonceCalled() } -func (fdm *ForkDetectorMock) ResetProbableHighestNonce() { - fdm.ResetProbableHighestNonceCalled() +func (fdm *ForkDetectorMock) SetRollBackNonce(nonce uint64) { + if fdm.SetRollBackNonceCalled != nil { + fdm.SetRollBackNonceCalled(nonce) + } } func (fdm *ForkDetectorMock) ResetFork() { @@ -54,8 +62,5 @@ func (fdm *ForkDetectorMock) GetNotarizedHeaderHash(nonce uint64) []byte { // IsInterfaceNil returns true if there is no value under the interface func (fdm *ForkDetectorMock) IsInterfaceNil() bool { - if fdm == nil { - return true - } - return false + return fdm == nil } diff --git a/consensus/mock/hasherMock.go b/consensus/mock/hasherMock.go index 22edb389f3a..bd7ed68ca2a 100644 --- a/consensus/mock/hasherMock.go +++ b/consensus/mock/hasherMock.go @@ -11,7 +11,7 @@ type HasherMock struct { // Compute will output the SHA's equivalent of the input string func (sha HasherMock) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -30,8 +30,5 @@ func (HasherMock) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (sha HasherMock) IsInterfaceNil() bool { - if &sha == nil { - return true - } return false } diff --git a/consensus/mock/headerHandlerStub.go b/consensus/mock/headerHandlerStub.go index 0be8d80348f..b27a23e50dc 100644 --- a/consensus/mock/headerHandlerStub.go +++ b/consensus/mock/headerHandlerStub.go @@ -17,6 +17,17 @@ type HeaderHandlerStub struct { CheckChainIDCalled func(reference []byte) error } +func (hhs *HeaderHandlerStub) GetReceiptsHash() []byte { + return []byte("receipt") +} + +func (hhs *HeaderHandlerStub) SetShardID(_ uint32) { +} + +func (hhs *HeaderHandlerStub) IsStartOfEpochBlock() bool { + return false +} + func (hhs *HeaderHandlerStub) Clone() data.HeaderHandler { return hhs.CloneCalled() } diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index 2dece715101..3b5bcaf6f47 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -28,9 +28,9 @@ func InitBlockProcessorMock() *BlockProcessorMock { blockProcessorMock.ProcessBlockCalled = func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return nil } - blockProcessorMock.ApplyBodyToHeaderCalled = func(hdr data.HeaderHandler, body data.BodyHandler) error { + blockProcessorMock.ApplyBodyToHeaderCalled = func(hdr data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { hdr.SetRootHash([]byte{}) - return nil + return body, nil } blockProcessorMock.DecodeBlockBodyCalled = func(dta []byte) data.BodyHandler { return block.Body{} diff --git a/consensus/mock/peerProcessorMock.go b/consensus/mock/peerProcessorMock.go index a6527db5df0..b238e624d8c 100644 --- a/consensus/mock/peerProcessorMock.go +++ b/consensus/mock/peerProcessorMock.go @@ -7,8 +7,8 @@ import ( type ValidatorStatisticsProcessorMock struct { LoadInitialStateCalled func(in []*sharding.InitialNode) error - UpdatePeerStateCalled func(header, previousHeader data.HeaderHandler) error - IsInterfaceNilCalled func() bool + UpdatePeerStateCalled func(header, previousHeader data.HeaderHandler) error + IsInterfaceNilCalled func() bool } func (pm *ValidatorStatisticsProcessorMock) LoadInitialState(in []*sharding.InitialNode) error { diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index 6f169d759b7..1424b52135b 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -116,12 +116,6 @@ func (sr *subroundEndRound) doEndRoundJob() bool { debugError("BroadcastBlock", err) } - // broadcast header to metachain - err = sr.BroadcastMessenger().BroadcastShardHeader(sr.Header) - if err != nil { - debugError("BroadcastShardHeader", err) - } - log.Debug("step 3: BlockBody and Header has been committed and broadcast", "type", "spos/bls", "time [s]", sr.SyncTimer().FormattedCurrentTime()) diff --git a/consensus/spos/bls/subroundSignature.go b/consensus/spos/bls/subroundSignature.go index 50f88e4151e..57b93594aca 100644 --- a/consensus/spos/bls/subroundSignature.go +++ b/consensus/spos/bls/subroundSignature.go @@ -81,7 +81,6 @@ func (sr *subroundSignature) doSignatureJob() bool { []byte(sr.SelfPubKey()), nil, int(MtSignature), - uint64(sr.Rounder().TimeStamp().Unix()), sr.Rounder().Index(), sr.ChainID(), ) diff --git a/consensus/spos/bls/subroundSignature_test.go b/consensus/spos/bls/subroundSignature_test.go index cacdb0929c6..725b64b06a1 100644 --- a/consensus/spos/bls/subroundSignature_test.go +++ b/consensus/spos/bls/subroundSignature_test.go @@ -290,7 +290,6 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { []byte(sr.ConsensusGroup()[1]), []byte("sig"), int(bls.MtSignature), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -417,7 +416,6 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bls.MtSignature), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) diff --git a/consensus/spos/bn/subroundBitmap.go b/consensus/spos/bn/subroundBitmap.go index 1ca14586076..ee062bb4ca9 100644 --- a/consensus/spos/bn/subroundBitmap.go +++ b/consensus/spos/bn/subroundBitmap.go @@ -65,7 +65,6 @@ func (sr *subroundBitmap) doBitmapJob() bool { []byte(sr.SelfPubKey()), nil, int(MtBitmap), - uint64(sr.Rounder().TimeStamp().Unix()), sr.Rounder().Index(), sr.ChainID(), ) diff --git a/consensus/spos/bn/subroundBitmap_test.go b/consensus/spos/bn/subroundBitmap_test.go index d508565a2e9..758d014b68a 100644 --- a/consensus/spos/bn/subroundBitmap_test.go +++ b/consensus/spos/bn/subroundBitmap_test.go @@ -265,7 +265,6 @@ func TestSubroundBitmap_ReceivedBitmap(t *testing.T) { []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bn.MtCommitmentHash), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -404,7 +403,6 @@ func TestSubroundBitmap_ReceivedBitmapReturnFalseWhenConsensusDataIsNotEqual(t * []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bn.MtBitmap), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) diff --git a/consensus/spos/bn/subroundCommitment.go b/consensus/spos/bn/subroundCommitment.go index e93f312bf71..259cd2e7f9e 100644 --- a/consensus/spos/bn/subroundCommitment.go +++ b/consensus/spos/bn/subroundCommitment.go @@ -85,7 +85,6 @@ func (sr *subroundCommitment) doCommitmentJob() bool { []byte(sr.SelfPubKey()), nil, int(MtCommitment), - uint64(sr.Rounder().TimeStamp().Unix()), sr.Rounder().Index(), sr.ChainID(), ) diff --git a/consensus/spos/bn/subroundCommitmentHash.go b/consensus/spos/bn/subroundCommitmentHash.go index 1b666e901e0..124441a0aa9 100644 --- a/consensus/spos/bn/subroundCommitmentHash.go +++ b/consensus/spos/bn/subroundCommitmentHash.go @@ -65,7 +65,6 @@ func (sr *subroundCommitmentHash) doCommitmentHashJob() bool { []byte(sr.SelfPubKey()), nil, int(MtCommitmentHash), - uint64(sr.Rounder().TimeStamp().Unix()), sr.Rounder().Index(), sr.ChainID(), ) diff --git a/consensus/spos/bn/subroundCommitmentHash_test.go b/consensus/spos/bn/subroundCommitmentHash_test.go index 87e33d8ee8d..90aec698a82 100644 --- a/consensus/spos/bn/subroundCommitmentHash_test.go +++ b/consensus/spos/bn/subroundCommitmentHash_test.go @@ -287,7 +287,6 @@ func TestSubroundCommitmentHash_ReceivedCommitmentHash(t *testing.T) { []byte(sr.ConsensusGroup()[0]), nil, int(bn.MtCommitmentHash), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -527,7 +526,6 @@ func TestSubroundCommitmentHash_ReceivedCommitmentHashReturnFalseWhenConsensusDa []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bn.MtCommitmentHash), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) diff --git a/consensus/spos/bn/subroundCommitment_test.go b/consensus/spos/bn/subroundCommitment_test.go index 37d04ea336e..1c76b7beb56 100644 --- a/consensus/spos/bn/subroundCommitment_test.go +++ b/consensus/spos/bn/subroundCommitment_test.go @@ -255,7 +255,6 @@ func TestSubroundCommitment_ReceivedCommitment(t *testing.T) { []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bn.MtCommitment), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -371,7 +370,6 @@ func TestSubroundCommitment_ReceivedCommitmentReturnFalseWhenConsensusDataIsNotE []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bn.MtCommitment), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) diff --git a/consensus/spos/bn/subroundEndRound.go b/consensus/spos/bn/subroundEndRound.go index 9c0fa9b39ed..6c7c20914b3 100644 --- a/consensus/spos/bn/subroundEndRound.go +++ b/consensus/spos/bn/subroundEndRound.go @@ -101,12 +101,6 @@ func (sr *subroundEndRound) doEndRoundJob() bool { debugError("BroadcastBlock", err) } - // broadcast header to metachain - err = sr.BroadcastMessenger().BroadcastShardHeader(sr.Header) - if err != nil { - debugError("BroadcastShardHeader", err) - } - log.Debug("step 6: TxBlockBody and Header has been committed and broadcast", "type", "spos/bn", "time [s]", sr.SyncTimer().FormattedCurrentTime()) diff --git a/consensus/spos/bn/subroundSignature.go b/consensus/spos/bn/subroundSignature.go index 379a518a41d..e2e19304f31 100644 --- a/consensus/spos/bn/subroundSignature.go +++ b/consensus/spos/bn/subroundSignature.go @@ -92,7 +92,6 @@ func (sr *subroundSignature) doSignatureJob() bool { []byte(sr.SelfPubKey()), nil, int(MtSignature), - uint64(sr.Rounder().TimeStamp().Unix()), sr.Rounder().Index(), sr.ChainID(), ) diff --git a/consensus/spos/bn/subroundSignature_test.go b/consensus/spos/bn/subroundSignature_test.go index 671c6a0fdb3..d24bc672557 100644 --- a/consensus/spos/bn/subroundSignature_test.go +++ b/consensus/spos/bn/subroundSignature_test.go @@ -404,7 +404,6 @@ func TestSubroundSignature_ReceivedSignature(t *testing.T) { []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bn.MtSignature), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -520,7 +519,6 @@ func TestSubroundSignature_ReceivedSignatureReturnFalseWhenConsensusDataIsNotEqu []byte(sr.ConsensusGroup()[0]), []byte("sig"), int(bn.MtCommitmentHash), - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) diff --git a/consensus/spos/commonSubround/subroundBlock.go b/consensus/spos/commonSubround/subroundBlock.go index 76de5837cde..14ba11bd0ee 100644 --- a/consensus/spos/commonSubround/subroundBlock.go +++ b/consensus/spos/commonSubround/subroundBlock.go @@ -94,7 +94,7 @@ func (sr *SubroundBlock) doBlockJob() bool { return false } - err = sr.BlockProcessor().ApplyBodyToHeader(hdr, body) + body, err = sr.BlockProcessor().ApplyBodyToHeader(hdr, body) if err != nil { log.Debug("ApplyBodyToHeader", "error", err.Error()) return false @@ -147,7 +147,6 @@ func (sr *SubroundBlock) sendBlockBody(blockBody data.BodyHandler) bool { []byte(sr.SelfPubKey()), nil, sr.mtBlockBody, - uint64(sr.Rounder().TimeStamp().Unix()), sr.Rounder().Index(), sr.ChainID(), ) @@ -182,7 +181,6 @@ func (sr *SubroundBlock) sendBlockHeader(hdr data.HeaderHandler) bool { []byte(sr.SelfPubKey()), nil, sr.mtBlockHeader, - uint64(sr.Rounder().TimeStamp().Unix()), sr.Rounder().Index(), sr.ChainID(), ) @@ -225,6 +223,7 @@ func (sr *SubroundBlock) createHeader() (data.HeaderHandler, error) { return nil, err } + hdr.SetShardID(sr.ShardCoordinator().SelfId()) hdr.SetRound(uint64(sr.Rounder().Index())) hdr.SetTimeStamp(uint64(sr.Rounder().TimeStamp().Unix())) hdr.SetPrevRandSeed(prevRandSeed) diff --git a/consensus/spos/commonSubround/subroundBlock_test.go b/consensus/spos/commonSubround/subroundBlock_test.go index 7623d5216e5..8385883964c 100644 --- a/consensus/spos/commonSubround/subroundBlock_test.go +++ b/consensus/spos/commonSubround/subroundBlock_test.go @@ -326,7 +326,6 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { []byte(sr.ConsensusGroup()[0]), []byte("sig"), MtBlockBody, - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -358,7 +357,6 @@ func TestSubroundBlock_ReceivedBlock(t *testing.T) { []byte(sr.ConsensusGroup()[0]), []byte("sig"), MtBlockHeader, - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -406,7 +404,6 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenBodyAndHeaderAre []byte(sr.ConsensusGroup()[0]), []byte("sig"), MtBlockBody, - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -432,7 +429,6 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockFail []byte(sr.ConsensusGroup()[0]), []byte("sig"), MtBlockBody, - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -454,7 +450,6 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnFalseWhenProcessBlockRetu []byte(sr.ConsensusGroup()[0]), []byte("sig"), MtBlockBody, - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -482,7 +477,6 @@ func TestSubroundBlock_ProcessReceivedBlockShouldReturnTrue(t *testing.T) { []byte(sr.ConsensusGroup()[0]), []byte("sig"), MtBlockBody, - uint64(sr.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -664,7 +658,7 @@ func TestSubroundBlock_CreateHeaderNilCurrentHeader(t *testing.T) { _ = sr.BlockChain().SetCurrentBlockHeader(nil) header, _ := sr.CreateHeader() body, _ := sr.CreateBody(header) - _ = sr.BlockProcessor().ApplyBodyToHeader(header, body) + _, _ = sr.BlockProcessor().ApplyBodyToHeader(header, body) _ = sr.SendBlockBody(body) _ = sr.SendBlockHeader(header) @@ -694,7 +688,7 @@ func TestSubroundBlock_CreateHeaderNotNilCurrentHeader(t *testing.T) { header, _ := sr.CreateHeader() body, _ := sr.CreateBody(header) - _ = sr.BlockProcessor().ApplyBodyToHeader(header, body) + _, _ = sr.BlockProcessor().ApplyBodyToHeader(header, body) _ = sr.SendBlockBody(body) _ = sr.SendBlockHeader(header) @@ -729,12 +723,12 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { }, } bp := mock.InitBlockProcessorMock() - bp.ApplyBodyToHeaderCalled = func(header data.HeaderHandler, body data.BodyHandler) error { + bp.ApplyBodyToHeaderCalled = func(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { shardHeader, _ := header.(*block.Header) shardHeader.MiniBlockHeaders = mbHeaders shardHeader.RootHash = []byte{} - return nil + return body, nil } container := mock.InitConsensusCore() sr := *initSubroundBlockWithBlockProcessor(bp, container) @@ -742,7 +736,7 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { header, _ := sr.CreateHeader() body, _ := sr.CreateBody(header) - _ = sr.BlockProcessor().ApplyBodyToHeader(header, body) + _, _ = sr.BlockProcessor().ApplyBodyToHeader(header, body) _ = sr.SendBlockBody(body) _ = sr.SendBlockHeader(header) @@ -765,8 +759,8 @@ func TestSubroundBlock_CreateHeaderMultipleMiniBlocks(t *testing.T) { func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { expectedErr := errors.New("nil mini blocks") bp := mock.InitBlockProcessorMock() - bp.ApplyBodyToHeaderCalled = func(header data.HeaderHandler, body data.BodyHandler) error { - return expectedErr + bp.ApplyBodyToHeaderCalled = func(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { + return body, expectedErr } container := mock.InitConsensusCore() sr := *initSubroundBlockWithBlockProcessor(bp, container) @@ -776,7 +770,7 @@ func TestSubroundBlock_CreateHeaderNilMiniBlocks(t *testing.T) { header, _ := sr.CreateHeader() body, _ := sr.CreateBody(header) - err := sr.BlockProcessor().ApplyBodyToHeader(header, body) + _, err := sr.BlockProcessor().ApplyBodyToHeader(header, body) assert.Equal(t, expectedErr, err) } diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 782471e2a6c..9f7e89cb443 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -253,7 +253,5 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error sr.SetConsensusGroup(nextConsensusGroup) - sr.BlockProcessor().SetConsensusData(randomSeed, uint64(sr.RoundIndex), currentHeader.GetEpoch(), shardId) - return nil } diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 999f951b78e..df12c85e899 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -222,7 +222,11 @@ func (cns *ConsensusState) GenerateBitmap(subroundId int) []byte { // generate bitmap according to set commitment hashes sizeConsensus := len(cns.ConsensusGroup()) - bitmap := make([]byte, sizeConsensus/8+1) + bitmapSize := sizeConsensus / 8 + if sizeConsensus%8 != 0 { + bitmapSize++ + } + bitmap := make([]byte, bitmapSize) for i := 0; i < sizeConsensus; i++ { pubKey := cns.ConsensusGroup()[i] diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index f3213cee0bd..df6bcd4d457 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -12,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/ntp" "github.com/ElrondNetwork/elrond-go/p2p" @@ -247,12 +248,10 @@ func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToS return err } if !bytes.Equal(cnsDta.ChainID, wrk.chainID) { - err := fmt.Errorf("%w received: %s, wanted %s", + return fmt.Errorf("%w : received: %s, wanted: %s", ErrInvalidChainID, hex.EncodeToString(cnsDta.ChainID), - hex.EncodeToString(wrk.chainID), - ) - return err + hex.EncodeToString(wrk.chainID)) } msgType := consensus.MessageType(cnsDta.MsgType) @@ -266,7 +265,9 @@ func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToS senderOK := wrk.consensusState.IsNodeInEligibleList(string(cnsDta.PubKey)) if !senderOK { - return ErrSenderNotOk + return fmt.Errorf("%w : node with public key %s is not in eligible list", + ErrSenderNotOk, + display.DisplayByteSlice(cnsDta.PubKey)) } if wrk.consensusState.RoundIndex > cnsDta.RoundIndex { @@ -282,32 +283,19 @@ func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToS sigVerifErr := wrk.checkSignature(cnsDta) if sigVerifErr != nil { - return ErrInvalidSignature + return fmt.Errorf("%w : verify consensus data signature failed: %s", + ErrInvalidSignature, + sigVerifErr.Error()) } if wrk.consensusService.IsMessageWithBlockHeader(msgType) { headerHash := cnsDta.BlockHeaderHash header := wrk.blockProcessor.DecodeBlockHeader(cnsDta.SubRoundData) - err = wrk.headerSigVerifier.VerifyRandSeed(header) - if err != nil { - return err - } - isHeaderInvalid := check.IfNil(header) || headerHash == nil if isHeaderInvalid { - return ErrInvalidHeader - } - - err := header.CheckChainID(wrk.chainID) - if err != nil { - return err - } - - err = wrk.forkDetector.AddHeader(header, headerHash, process.BHProposed, nil, nil, false) - if err != nil { - log.Trace("add header in forkdetector", "error", err.Error()) - return err + return fmt.Errorf("%w : received header from consensus topic is invalid", + ErrInvalidHeader) } log.Debug("received proposed block", @@ -317,6 +305,26 @@ func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToS "nonce", header.GetNonce(), "prev hash", header.GetPrevHash(), ) + + err = header.CheckChainID(wrk.chainID) + if err != nil { + return fmt.Errorf("%w : chain ID in received header from consensus topic is invalid", + err) + } + + err = wrk.headerSigVerifier.VerifyRandSeed(header) + if err != nil { + return fmt.Errorf("%w : verify rand seed for received header from consensus topic failed", + err) + } + + err = wrk.forkDetector.AddHeader(header, headerHash, process.BHProposed, nil, nil) + if err != nil { + log.Debug("add received header from consensus topic to fork detector failed", + "error", err.Error()) + //we should not return error here because the other peers connected to self might need this message + //to advance the consensus + } } if wrk.consensusService.IsMessageWithSignature(msgType) { diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 3e5caeb012d..75db78f789b 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -32,7 +32,7 @@ func initWorker() *spos.Worker { broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} - forkDetectorMock.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalsHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + forkDetectorMock.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil } keyGeneratorMock, _, _ := mock.InitKeys() @@ -660,7 +660,7 @@ func TestWorker_ProcessReceivedMessageWrongHeaderShouldErr(t *testing.T) { broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} - forkDetectorMock.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalsHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + forkDetectorMock.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil } keyGeneratorMock, _, _ := mock.InitKeys() @@ -712,14 +712,13 @@ func TestWorker_ProcessReceivedMessageWrongHeaderShouldErr(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockHeader), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) buff, _ := wrk.Marshalizer().Marshal(cnsMsg) time.Sleep(time.Second) err := wrk.ProcessReceivedMessage(&mock.P2PMessageMock{DataField: buff}, nil) - assert.Equal(t, process.ErrRandSeedDoesNotMatch, err) + assert.True(t, errors.Is(err, spos.ErrInvalidHeader)) } func TestWorker_ReceivedSyncStateShouldNotSendOnChannelWhenInputIsFalse(t *testing.T) { @@ -816,7 +815,6 @@ func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -841,7 +839,6 @@ func TestWorker_ProcessReceivedMessageHeaderShouldRetNil(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtUnknown), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -883,7 +880,6 @@ func TestWorker_ProcessReceivedMessageNodeNotInEligibleListShouldErr(t *testing. []byte("X"), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -892,7 +888,7 @@ func TestWorker_ProcessReceivedMessageNodeNotInEligibleListShouldErr(t *testing. time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) - assert.Equal(t, spos.ErrSenderNotOk, err) + assert.True(t, errors.Is(err, spos.ErrSenderNotOk)) } func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShouldErr(t *testing.T) { @@ -907,7 +903,6 @@ func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShoul []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 1, []byte("inconsistent chain ID"), ) @@ -928,7 +923,6 @@ func TestWorker_ProcessReceivedMessageMessageIsForPastRoundShouldErr(t *testing. []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), -1, chainID, ) @@ -951,7 +945,6 @@ func TestWorker_ProcessReceivedMessageInvalidSignatureShouldErr(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), nil, int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -960,7 +953,7 @@ func TestWorker_ProcessReceivedMessageInvalidSignatureShouldErr(t *testing.T) { time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bn.MtBlockBody])) - assert.Equal(t, spos.ErrInvalidSignature, err) + assert.True(t, errors.Is(err, spos.ErrInvalidSignature)) } func TestWorker_ProcessReceivedMessageReceivedMessageIsFromSelfShouldRetNilAndNotProcess(t *testing.T) { @@ -974,7 +967,6 @@ func TestWorker_ProcessReceivedMessageReceivedMessageIsFromSelfShouldRetNilAndNo []byte(wrk.ConsensusState().SelfPubKey()), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -998,7 +990,6 @@ func TestWorker_ProcessReceivedMessageWhenRoundIsCanceledShouldRetNilAndNotProce []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1038,7 +1029,6 @@ func TestWorker_ProcessReceivedMessageWrongChainIDInProposedBlockShouldError(t * []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockHeader), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1076,7 +1066,6 @@ func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockHeader), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1098,7 +1087,6 @@ func TestWorker_CheckSelfStateShouldErrMessageFromItself(t *testing.T) { nil, 0, 0, - 0, chainID, ) err := wrk.CheckSelfState(cnsMsg) @@ -1116,7 +1104,6 @@ func TestWorker_CheckSelfStateShouldErrRoundCanceled(t *testing.T) { nil, 0, 0, - 0, chainID, ) err := wrk.CheckSelfState(cnsMsg) @@ -1133,7 +1120,6 @@ func TestWorker_CheckSelfStateShouldNotErr(t *testing.T) { nil, 0, 0, - 0, chainID, ) err := wrk.CheckSelfState(cnsMsg) @@ -1159,7 +1145,6 @@ func TestWorker_CheckSignatureShouldReturnErrNilPublicKey(t *testing.T) { nil, []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1179,7 +1164,6 @@ func TestWorker_CheckSignatureShouldReturnErrNilSignature(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), nil, int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1205,7 +1189,6 @@ func TestWorker_CheckSignatureShouldReturnPublicKeyFromByteArrayErr(t *testing.T []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1228,7 +1211,6 @@ func TestWorker_CheckSignatureShouldReturnMarshalizerErr(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1248,7 +1230,6 @@ func TestWorker_CheckSignatureShouldReturnNilErr(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1269,7 +1250,6 @@ func TestWorker_ExecuteMessagesShouldNotExecuteWhenConsensusDataIsNil(t *testing []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1294,7 +1274,6 @@ func TestWorker_ExecuteMessagesShouldNotExecuteWhenMessageIsForOtherRound(t *tes []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), -1, chainID, ) @@ -1319,7 +1298,6 @@ func TestWorker_ExecuteBlockBodyMessagesShouldNotExecuteWhenStartRoundIsNotFinis []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1344,7 +1322,6 @@ func TestWorker_ExecuteBlockHeaderMessagesShouldNotExecuteWhenStartRoundIsNotFin []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockHeader), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1369,7 +1346,6 @@ func TestWorker_ExecuteCommitmentHashMessagesShouldNotExecuteWhenBlockIsNotFinis []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtCommitmentHash), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1394,7 +1370,6 @@ func TestWorker_ExecuteBitmapMessagesShouldNotExecuteWhenBlockIsNotFinished(t *t []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBitmap), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1419,7 +1394,6 @@ func TestWorker_ExecuteCommitmentMessagesShouldNotExecuteWhenBitmapIsNotFinished []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtCommitment), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1444,7 +1418,6 @@ func TestWorker_ExecuteSignatureMessagesShouldNotExecuteWhenBitmapIsNotFinished( []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtSignature), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1469,7 +1442,6 @@ func TestWorker_ExecuteMessagesShouldExecute(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) @@ -1504,7 +1476,6 @@ func TestWorker_CheckChannelsShouldWork(t *testing.T) { []byte(cnsGroup[0]), []byte("sig"), int(bn.MtBlockHeader), - uint64(wrk.Rounder().TimeStamp().Unix()), 1, chainID, ) @@ -1629,7 +1600,6 @@ func TestWorker_ExecuteStoredMessagesShouldWork(t *testing.T) { []byte(wrk.ConsensusState().ConsensusGroup()[0]), []byte("sig"), int(bn.MtBlockBody), - uint64(wrk.Rounder().TimeStamp().Unix()), 0, chainID, ) diff --git a/core/address.go b/core/address.go index 5d68d61e725..e69e0eaaab4 100644 --- a/core/address.go +++ b/core/address.go @@ -17,6 +17,16 @@ const ShardIdentiferLen = 2 const metaChainShardIdentifier uint8 = 255 const numInitCharactersForOnMetachainSC = 5 +// IsEmptyAddress verifies if a set address is of empty - so it is deployment address +func IsEmptyAddress(rcvAddress []byte) bool { + if len(rcvAddress) <= NumInitCharactersForScAddress { + return false + } + + isEmptyAddress := bytes.Equal(rcvAddress, make([]byte, len(rcvAddress))) + return isEmptyAddress +} + // IsSmartContractAddress verifies if a set address is of type smart contract func IsSmartContractAddress(rcvAddress []byte) bool { if len(rcvAddress) <= NumInitCharactersForScAddress { diff --git a/core/constants.go b/core/constants.go index 5728feef3e1..e5f9053299e 100644 --- a/core/constants.go +++ b/core/constants.go @@ -16,6 +16,9 @@ const NodeTypeValidator NodeType = "validator" // pkPrefixSize specifies the max numbers of chars to be displayed from one publc key const pkPrefixSize = 12 +// FileModeUserReadWrite represents the permission for a file which allows the user for reading and writing +const FileModeUserReadWrite = 0600 + // MaxBulkTransactionSize specifies the maximum size of one bulk with txs which can be send over the network //TODO convert this const into a var and read it from config when this code moves to another binary const MaxBulkTransactionSize = 2 << 17 //128KB bulks @@ -23,8 +26,14 @@ const MaxBulkTransactionSize = 2 << 17 //128KB bulks // ConsensusTopic is the topic used in consensus algorithm const ConsensusTopic = "consensus" -// GenesisBlockNonce is the nonce of the genesis block -const GenesisBlockNonce = 0 +// PathShardPlaceholder represents the placeholder for the shard ID in paths +const PathShardPlaceholder = "[S]" + +// PathEpochPlaceholder represents the placeholder for the epoch number in paths +const PathEpochPlaceholder = "[E]" + +// PathIdentifierPlaceholder represents the placeholder for the identifier in paths +const PathIdentifierPlaceholder = "[I]" // MetricCurrentRound is the metric for monitoring the current round of a node const MetricCurrentRound = "erd_current_round" @@ -111,6 +120,9 @@ const MetricNetworkSentBpsPeak = "erd_network_sent_bps_peak" // MetricRoundTime is the metric for round time in seconds const MetricRoundTime = "erd_round_time" +// MetricEpochNumber is the metric for the number of epoch +const MetricEpochNumber = "erd_epoch_number" + // MetricAppVersion is the metric for the current app version const MetricAppVersion = "erd_app_version" diff --git a/core/converters.go b/core/converters.go index 63254d9d38d..79b26a68a34 100644 --- a/core/converters.go +++ b/core/converters.go @@ -6,6 +6,7 @@ import ( "fmt" "math" "strconv" + "strings" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" @@ -96,3 +97,36 @@ func SecondsToHourMinSec(input int) string { return result } + +// GetShardIdString will return the string representation of the shard id +func GetShardIdString(shardId uint32) string { + if shardId == math.MaxUint32 { + return "metachain" + } + + return fmt.Sprintf("%d", shardId) +} + +// EpochStartIdentifier returns the string for the epoch start identifier +func EpochStartIdentifier(epoch uint32) string { + return fmt.Sprintf("epochStartBlock_%d", epoch) +} + +// IsUnknownEpochIdentifier return if the epoch identifier represents unknown epoch +func IsUnknownEpochIdentifier(identifier []byte) (bool, error) { + splitString := strings.Split(string(identifier), "_") + if len(splitString) == 0 || len(splitString[0]) == 0 { + return false, ErrInvalidIdentifierForEpochStartBlockRequest + } + + epoch, err := strconv.ParseUint(splitString[1], 10, 32) + if err != nil { + return false, ErrInvalidIdentifierForEpochStartBlockRequest + } + + if epoch == math.MaxUint32 { + return true, nil + } + + return false, nil +} diff --git a/core/errors.go b/core/errors.go index 808cd09988e..fc9f759fcbd 100644 --- a/core/errors.go +++ b/core/errors.go @@ -13,9 +13,6 @@ var ErrNilHasher = errors.New("nil hasher provided") // ErrNilCoordinator signals that a nil shardCoordinator has been provided var ErrNilCoordinator = errors.New("nil coordinator provided") -// ErrNilLogger signals that a nil logger has been provided -var ErrNilLogger = errors.New("nil logger provided") - // ErrInvalidValue signals that a nil value has been provided var ErrInvalidValue = errors.New("invalid value provided") @@ -45,3 +42,6 @@ var ErrNotPositiveValue = errors.New("the provided value is not positive") // ErrNilAppStatusHandler signals that a nil status handler has been provided var ErrNilAppStatusHandler = errors.New("appStatusHandler is nil") + +// ErrInvalidIdentifierForEpochStartBlockRequest +var ErrInvalidIdentifierForEpochStartBlockRequest = errors.New("invalid identifier for epoch start block request") diff --git a/core/export_test.go b/core/export_test.go new file mode 100644 index 00000000000..e616ced5d0d --- /dev/null +++ b/core/export_test.go @@ -0,0 +1,7 @@ +package core + +import "time" + +func (sw *stopWatch) GetContainingDuration() (map[string]time.Duration, []string) { + return sw.getContainingDuration() +} diff --git a/core/file.go b/core/file.go index b3095d22c05..7b0943d8119 100644 --- a/core/file.go +++ b/core/file.go @@ -18,7 +18,7 @@ func OpenFile(relativePath string) (*os.File, error) { log.Warn("cannot create absolute path for the provided file", "error", err.Error()) return nil, err } - f, err := os.Open(path) + f, err := os.Open(filepath.Clean(path)) if err != nil { return nil, err } @@ -120,7 +120,7 @@ func CreateFile(prefix string, subfolder string, fileExtension string) (*os.File return os.OpenFile( filepath.Join(absPath, fileName+"."+fileExtension), os.O_CREATE|os.O_APPEND|os.O_WRONLY, - 0666) + FileModeUserReadWrite) } // LoadSkFromPemFile loads the secret key bytes stored in the file diff --git a/core/indexer/data.go b/core/indexer/data.go index 4041e166202..f7416477894 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -21,7 +21,7 @@ type Transaction struct { SenderShard uint32 `json:"senderShard"` GasPrice uint64 `json:"gasPrice"` GasLimit uint64 `json:"gasLimit"` - Data string `json:"data"` + Data []byte `json:"data"` Signature string `json:"signature"` Timestamp time.Duration `json:"timestamp"` Status string `json:"status"` diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index 644348b125c..f33ecfbfccd 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -16,6 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/receipt" "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -255,7 +256,11 @@ func (ei *elasticIndexer) SaveRoundInfo(roundInfo RoundInfo) { } buff.Grow(len(marshalizedRoundInfo)) - buff.Write(marshalizedRoundInfo) + _, err = buff.Write(marshalizedRoundInfo) + if err != nil { + log.Warn("elastic search: save round info, write", "error", err.Error()) + return + } req := esapi.IndexRequest{ Index: roundIndex, @@ -306,7 +311,10 @@ func (ei *elasticIndexer) saveShardValidatorsPubKeys(shardId uint32, shardValida } buff.Grow(len(marshalizedValidatorPubKeys)) - buff.Write(marshalizedValidatorPubKeys) + _, err = buff.Write(marshalizedValidatorPubKeys) + if err != nil { + log.Warn("elastic search: save shard validators pub keys, write", "error", err.Error()) + } req := esapi.IndexRequest{ Index: validatorsIndex, @@ -367,7 +375,10 @@ func (ei *elasticIndexer) saveHeader(header data.HeaderHandler, signersIndexes [ serializedBlock, headerHash := ei.getSerializedElasticBlockAndHeaderHash(header, signersIndexes) buff.Grow(len(serializedBlock)) - buff.Write(serializedBlock) + _, err := buff.Write(serializedBlock) + if err != nil { + log.Warn("elastic search: save header, write", "error", err.Error()) + } req := esapi.IndexRequest{ Index: blockIndex, @@ -404,8 +415,14 @@ func (ei *elasticIndexer) serializeBulkTx(bulk []*Transaction) bytes.Buffer { serializedTx = append(serializedTx, "\n"...) buff.Grow(len(meta) + len(serializedTx)) - buff.Write(meta) - buff.Write(serializedTx) + _, err = buff.Write(meta) + if err != nil { + log.Warn("elastic search: serialize bulk tx, write meta", "error", err.Error()) + } + _, err = buff.Write(serializedTx) + if err != nil { + log.Warn("elastic search: serialize bulk tx, write serialized tx", "error", err.Error()) + } } return buff @@ -540,8 +557,14 @@ func (ei *elasticIndexer) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { serializedInfo = append(serializedInfo, "\n"...) buff.Grow(len(meta) + len(serializedInfo)) - buff.Write(meta) - buff.Write(serializedInfo) + _, err = buff.Write(meta) + if err != nil { + log.Warn("elastic search: update TPS write meta", "error", err.Error()) + } + _, err = buff.Write(serializedInfo) + if err != nil { + log.Warn("elastic search: update TPS write serialized info", "error", err.Error()) + } for _, shardInfo := range tpsBenchmark.ShardStatistics() { serializedInfo, meta := ei.serializeShardInfo(shardInfo) @@ -550,8 +573,14 @@ func (ei *elasticIndexer) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { } buff.Grow(len(meta) + len(serializedInfo)) - buff.Write(meta) - buff.Write(serializedInfo) + _, err = buff.Write(meta) + if err != nil { + log.Warn("elastic search: update TPS write meta", "error", err.Error()) + } + _, err = buff.Write(serializedInfo) + if err != nil { + log.Warn("elastic search: update TPS write serialized data", "error", err.Error()) + } res, err := ei.db.Bulk(bytes.NewReader(buff.Bytes()), ei.db.Bulk.WithIndex(tpsIndex)) if err != nil { @@ -618,6 +647,11 @@ func getTransactionByType( return buildRewardTransaction(currentReward, txHash, mbHash, blockHash, mb, header) } + currentReceipt, ok := tx.(*receipt.Receipt) + if ok && currentReceipt != nil { + return buildReceiptTransaction(currentReceipt, txHash, mbHash, blockHash, mb, header) + } + return nil } @@ -702,7 +736,35 @@ func buildRewardTransaction( SenderShard: mb.SenderShardID, GasPrice: 0, GasLimit: 0, - Data: "", + Data: []byte(""), + Signature: "", + Timestamp: time.Duration(header.GetTimeStamp()), + Status: "Success", + } +} + +func buildReceiptTransaction( + rpt *receipt.Receipt, + txHash []byte, + mbHash []byte, + blockHash []byte, + mb *block.MiniBlock, + header data.HeaderHandler, +) *Transaction { + return &Transaction{ + Hash: hex.EncodeToString(txHash), + MBHash: hex.EncodeToString(mbHash), + BlockHash: hex.EncodeToString(blockHash), + Nonce: rpt.GetNonce(), + Round: header.GetRound(), + Value: rpt.Value.String(), + Receiver: hex.EncodeToString(rpt.GetRecvAddress()), + Sender: hex.EncodeToString(rpt.GetSndAddress()), + ReceiverShard: mb.ReceiverShardID, + SenderShard: mb.SenderShardID, + GasPrice: 0, + GasLimit: 0, + Data: rpt.Data, Signature: "", Timestamp: time.Duration(header.GetTimeStamp()), Status: "Success", diff --git a/core/indexer/elasticsearch_test.go b/core/indexer/elasticsearch_test.go index 95c2bce8463..60a026b87f2 100644 --- a/core/indexer/elasticsearch_test.go +++ b/core/indexer/elasticsearch_test.go @@ -94,9 +94,8 @@ func newTestTxPool() map[string]data.TransactionHandler { SndAddr: []byte("sender_address1"), GasPrice: uint64(10000), GasLimit: uint64(1000), - Data: "tx_data1", + Data: []byte("tx_data1"), Signature: []byte("signature1"), - Challenge: []byte("challange1"), } txPool["tx2"] = &transaction.Transaction{ @@ -106,9 +105,8 @@ func newTestTxPool() map[string]data.TransactionHandler { SndAddr: []byte("sender_address2"), GasPrice: uint64(10000), GasLimit: uint64(1000), - Data: "tx_data2", + Data: []byte("tx_data2"), Signature: []byte("signature2"), - Challenge: []byte("challange2"), } txPool["tx3"] = &transaction.Transaction{ @@ -118,9 +116,8 @@ func newTestTxPool() map[string]data.TransactionHandler { SndAddr: []byte("sender_address3"), GasPrice: uint64(10000), GasLimit: uint64(1000), - Data: "tx_data3", + Data: []byte("tx_data3"), Signature: []byte("signature3"), - Challenge: []byte("challange3"), } return txPool diff --git a/core/interface.go b/core/interface.go index 5999f9a6d69..09431958498 100644 --- a/core/interface.go +++ b/core/interface.go @@ -1,6 +1,6 @@ package core -// AppStatusHandler interface will handle different implementations of monitoring tools, such as Prometheus of term-ui +// AppStatusHandler interface will handle different implementations of monitoring tools, such as term-ui or status metrics type AppStatusHandler interface { IsInterfaceNil() bool Increment(key string) diff --git a/core/mock/hasherMock.go b/core/mock/hasherMock.go index f896cacd0dd..17b88ebcbaa 100644 --- a/core/mock/hasherMock.go +++ b/core/mock/hasherMock.go @@ -11,7 +11,7 @@ type HasherMock struct { // Compute will output the SHA's equivalent of the input string func (sha HasherMock) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -30,8 +30,5 @@ func (HasherMock) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (sha HasherMock) IsInterfaceNil() bool { - if &sha == nil { - return true - } return false } diff --git a/core/statistics/resourceMonitor.go b/core/statistics/resourceMonitor.go index 3a50796baca..9876adccece 100644 --- a/core/statistics/resourceMonitor.go +++ b/core/statistics/resourceMonitor.go @@ -2,13 +2,18 @@ package statistics import ( "fmt" + "io/ioutil" "os" + "path" + "path/filepath" "runtime" "sync" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/statistics/machine" + "github.com/ElrondNetwork/elrond-go/storage" ) // ResourceMonitor outputs statistics about resources used by the binary @@ -31,7 +36,7 @@ func NewResourceMonitor(file *os.File) (*ResourceMonitor, error) { } // GenerateStatistics creates a new statistic string -func (rm *ResourceMonitor) GenerateStatistics() string { +func (rm *ResourceMonitor) GenerateStatistics(generalConfig *config.Config, pathManager storage.PathManagerHandler, shardId string) string { var memStats runtime.MemStats runtime.ReadMemStats(&memStats) @@ -51,11 +56,17 @@ func (rm *ResourceMonitor) GenerateStatistics() string { } } + trieStoragePath, mainDb := path.Split(pathManager.PathForStatic(shardId, generalConfig.AccountsTrieStorage.DB.FilePath)) + + trieDbFilePath := filepath.Join(trieStoragePath, mainDb) + evictionWaitingListDbFilePath := filepath.Join(trieStoragePath, generalConfig.EvictionWaitingList.DB.FilePath) + snapshotsDbFilePath := filepath.Join(trieStoragePath, generalConfig.TrieSnapshotDB.FilePath) + return fmt.Sprintf("timestamp: %d, uptime: %v, num go: %d, alloc: %s, heap alloc: %s, heap idle: %s"+ ", heap inuse: %s, heap sys: %s, heap released: %s, heap num objs: %d, sys mem: %s, "+ - "total mem: %s, num GC: %d, FDs: %d, num opened files: %d, num conns: %d\n", + "total mem: %s, num GC: %d, FDs: %d, num opened files: %d, num conns: %d, accountsTrieDbMem: %s, evictionDbMem: %s, snapshotsDbMem: %s\n", time.Now().Unix(), - time.Duration(time.Now().UnixNano() - rm.startTime.UnixNano()).Round(time.Second), + time.Duration(time.Now().UnixNano()-rm.startTime.UnixNano()).Round(time.Second), runtime.NumGoroutine(), core.ConvertBytes(memStats.Alloc), core.ConvertBytes(memStats.HeapAlloc), @@ -70,18 +81,32 @@ func (rm *ResourceMonitor) GenerateStatistics() string { fileDescriptors, numOpenFiles, numConns, + getDirMemSize(trieDbFilePath), + getDirMemSize(evictionWaitingListDbFilePath), + getDirMemSize(snapshotsDbFilePath), ) } +func getDirMemSize(dir string) string { + files, _ := ioutil.ReadDir(dir) + + size := int64(0) + for _, f := range files { + size += f.Size() + } + + return core.ConvertBytes(uint64(size)) +} + // SaveStatistics generates and saves statistic data on the disk -func (rm *ResourceMonitor) SaveStatistics() error { +func (rm *ResourceMonitor) SaveStatistics(generalConfig *config.Config, pathManager storage.PathManagerHandler, shardId string) error { rm.mutFile.RLock() defer rm.mutFile.RUnlock() if rm.file == nil { return ErrNilFileToWriteStats } - stats := rm.GenerateStatistics() + stats := rm.GenerateStatistics(generalConfig, pathManager, shardId) _, err := rm.file.WriteString(stats) if err != nil { return err diff --git a/core/statistics/resourceMonitor_test.go b/core/statistics/resourceMonitor_test.go index d517117d6d8..e4037ed884e 100644 --- a/core/statistics/resourceMonitor_test.go +++ b/core/statistics/resourceMonitor_test.go @@ -4,7 +4,9 @@ import ( "os" "testing" + "github.com/ElrondNetwork/elrond-go/config" stats "github.com/ElrondNetwork/elrond-go/core/statistics" + "github.com/ElrondNetwork/elrond-go/storage/mock" "github.com/stretchr/testify/assert" ) @@ -32,7 +34,7 @@ func TestResourceMonitor_GenerateStatisticsShouldPass(t *testing.T) { resourceMonitor, err := stats.NewResourceMonitor(&os.File{}) assert.Nil(t, err) - statistics := resourceMonitor.GenerateStatistics() + statistics := resourceMonitor.GenerateStatistics(&config.Config{AccountsTrieStorage: config.StorageConfig{DB: config.DBConfig{}}}, &mock.PathManagerStub{}, "") assert.Nil(t, err) assert.NotNil(t, statistics) @@ -46,7 +48,7 @@ func TestResourceMonitor_SaveStatisticsShouldPass(t *testing.T) { resourceMonitor, _ := stats.NewResourceMonitor(file) - err = resourceMonitor.SaveStatistics() + err = resourceMonitor.SaveStatistics(&config.Config{AccountsTrieStorage: config.StorageConfig{DB: config.DBConfig{}}}, &mock.PathManagerStub{}, "") if _, errF := os.Stat("test1"); errF == nil { _ = os.Remove("test1") } @@ -66,7 +68,7 @@ func TestResourceMonitor_SaveStatisticsCloseFileBeforeSaveShouldErr(t *testing.T err = resourceMonitor.Close() assert.Nil(t, err) - err = resourceMonitor.SaveStatistics() + err = resourceMonitor.SaveStatistics(&config.Config{AccountsTrieStorage: config.StorageConfig{DB: config.DBConfig{}}}, &mock.PathManagerStub{}, "") if _, errF := os.Stat("test2"); errF == nil { _ = os.Remove("test2") } diff --git a/core/statistics/softwareVersion/softwareVersion.go b/core/statistics/softwareVersion/softwareVersion.go index 31db62fd172..7bc1338e3ee 100644 --- a/core/statistics/softwareVersion/softwareVersion.go +++ b/core/statistics/softwareVersion/softwareVersion.go @@ -2,8 +2,9 @@ package softwareVersion import ( "bytes" + "crypto/rand" "encoding/json" - "math/rand" + "math/big" "net/http" "time" @@ -18,6 +19,7 @@ type tagVersion struct { TagVersion string `json:"tag_name"` } +// SoftwareVersionChecker is a component which is used to check if a new software stable tag is available type SoftwareVersionChecker struct { statusHandler core.AppStatusHandler mostRecentSoftwareVersion string @@ -32,8 +34,14 @@ func NewSoftwareVersionChecker(appStatusHandler core.AppStatusHandler) (*Softwar return nil, core.ErrNilAppStatusHandler } - // check interval will be random in a interval [1hour, 1hour 15minutes] - randInterval := time.Duration(rand.Int() % 15) + // check interval will be a random duration in the interval [1hour5minutes , 1hour20minutes] + randBigInt, err := rand.Int(rand.Reader, big.NewInt(15)) + if err != nil { + return nil, err + } + + randInt := randBigInt.Int64() + randInterval := time.Duration(randInt) checkRandInterval := checkInterval + randInterval*time.Minute return &SoftwareVersionChecker{ @@ -57,7 +65,7 @@ func (svc *SoftwareVersionChecker) StartCheckSoftwareVersion() { } func (svc *SoftwareVersionChecker) readLatestStableVersion() { - tagVersion, err := readJSONFromUrl(stableTagLocation) + tagVersion, err := readJSONFromUrl() if err != nil { log.Debug("cannot read json with latest stable tag", err) return @@ -69,8 +77,8 @@ func (svc *SoftwareVersionChecker) readLatestStableVersion() { svc.statusHandler.SetStringValue(core.MetricLatestTagSoftwareVersion, svc.mostRecentSoftwareVersion) } -func readJSONFromUrl(url string) (string, error) { - resp, err := http.Get(url) +func readJSONFromUrl() (string, error) { + resp, err := http.Get(stableTagLocation) if err != nil { return "", err } diff --git a/core/statistics/softwareVersion/softwareVersion_test.go b/core/statistics/softwareVersion/softwareVersion_test.go index 5b90d63f74b..49dcd84160a 100644 --- a/core/statistics/softwareVersion/softwareVersion_test.go +++ b/core/statistics/softwareVersion/softwareVersion_test.go @@ -26,12 +26,3 @@ func TestNewSoftwareVersionChecker(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, softwareChecker) } - -func TestCheckSoftwareVersion_Read(t *testing.T) { - t.Parallel() - - tag, err := readJSONFromUrl(stableTagLocation) - - assert.Nil(t, err) - assert.NotNil(t, tag) -} diff --git a/core/stopWatch.go b/core/stopWatch.go new file mode 100644 index 00000000000..15545ffc870 --- /dev/null +++ b/core/stopWatch.go @@ -0,0 +1,112 @@ +package core + +import ( + "fmt" + "sync" + "time" +) + +// MeasurementsLoggerFormat contains the formatting string to output elapsed time in seconds in a consistent way +const MeasurementsLoggerFormat = "%.4fs" + +type stopWatch struct { + mut sync.Mutex + identifiers []string + started map[string]time.Time + elapsed map[string]time.Duration +} + +// NewStopWatch returns a new stopWatch instance used to measure duration between finished and started events +func NewStopWatch() *stopWatch { + return &stopWatch{ + identifiers: make([]string, 0), + started: make(map[string]time.Time), + elapsed: make(map[string]time.Duration), + } +} + +// Start marks a start event for a provided identifier +func (sw *stopWatch) Start(identifier string) { + sw.mut.Lock() + _, hasStarted := sw.started[identifier] + _, hasElapsed := sw.elapsed[identifier] + if !hasStarted && !hasElapsed { + sw.identifiers = append(sw.identifiers, identifier) + } + + sw.started[identifier] = time.Now() + sw.mut.Unlock() +} + +func (sw *stopWatch) addIdentifier(identifier string) { + _, hasStarted := sw.started[identifier] + if hasStarted { + return + } + + _, hasElapsed := sw.elapsed[identifier] + if hasElapsed { + return + } + + sw.identifiers = append(sw.identifiers, identifier) +} + +// Stop marks a finish event for a provided identifier +func (sw *stopWatch) Stop(identifier string) { + sw.mut.Lock() + defer sw.mut.Unlock() + + timeStarted, ok := sw.started[identifier] + if !ok { + return + } + + sw.elapsed[identifier] += time.Since(timeStarted) + delete(sw.started, identifier) +} + +// GetMeasurements returns a logger compatible slice of interface{} containing pairs of (identifier, duration) +func (sw *stopWatch) GetMeasurements() []interface{} { + data, newIdentifiers := sw.getContainingDuration() + + output := make([]interface{}, 0) + for _, identifier := range newIdentifiers { + duration := data[identifier] + output = append(output, identifier) + output = append(output, fmt.Sprintf(MeasurementsLoggerFormat, duration.Seconds())) + } + + return output +} + +// getContainingDuration returns the containing map of (identifier, duration) pairs and the identifiers +func (sw *stopWatch) getContainingDuration() (map[string]time.Duration, []string) { + sw.mut.Lock() + + output := make(map[string]time.Duration) + newIdentifiers := make([]string, 0) + for _, identifier := range sw.identifiers { + duration, ok := sw.elapsed[identifier] + if !ok { + continue + } + + output[identifier] = duration + newIdentifiers = append(newIdentifiers, identifier) + } + sw.mut.Unlock() + + return output, newIdentifiers +} + +// Add adds a time measure containing duration list to self +func (sw *stopWatch) Add(src *stopWatch) { + sw.mut.Lock() + data, _ := src.getContainingDuration() + for identifier, duration := range data { + sw.addIdentifier(identifier) + sw.elapsed[identifier] += duration + } + sw.mut.Unlock() +} diff --git a/core/stopWatch_test.go b/core/stopWatch_test.go new file mode 100644 index 00000000000..2759efc5357 --- /dev/null +++ b/core/stopWatch_test.go @@ -0,0 +1,123 @@ +package core + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestStopWatch_Start(t *testing.T) { + t.Parallel() + + sw := NewStopWatch() + identifier := "identifier" + + sw.Start(identifier) + + _, has := sw.started[identifier] + + assert.True(t, has) + assert.Equal(t, identifier, sw.identifiers[0]) +} + +func TestStopWatch_DoubleStartShouldNotReAddInIdentifiers(t *testing.T) { + t.Parallel() + + sw := NewStopWatch() + identifier1 := "identifier1" + identifier2 := "identifier2" + + sw.Start(identifier1) + sw.Start(identifier2) + sw.Start(identifier1) + + assert.Equal(t, identifier1, sw.identifiers[0]) + assert.Equal(t, identifier2, sw.identifiers[1]) + assert.Equal(t, 2, len(sw.identifiers)) +} + +func TestStopWatch_StopNoStartShouldNotAddDuration(t *testing.T) { + t.Parallel() + + sw := NewStopWatch() + identifier := "identifier" + + sw.Stop(identifier) + + _, has := sw.elapsed[identifier] + + assert.False(t, has) +} + +func TestStopWatch_StopWithStartShouldAddDuration(t *testing.T) { + t.Parallel() + + sw := NewStopWatch() + identifier := "identifier" + + sw.Start(identifier) + sw.Stop(identifier) + + _, has := sw.elapsed[identifier] + + assert.True(t, has) +} + +func TestStopWatch_GetMeasurementsNotFinishedShouldOmit(t *testing.T) { + t.Parallel() + + sw := NewStopWatch() + identifier := "identifier" + + sw.Start(identifier) + + measurements := sw.GetMeasurements() + log.Info("measurements", measurements...) + + assert.Equal(t, 0, len(measurements)) +} + +func TestStopWatch_GetMeasurementsShouldWork(t *testing.T) { + t.Parallel() + + sw := NewStopWatch() + identifier := "identifier" + + sw.Start(identifier) + sw.Stop(identifier) + + measurements := sw.GetMeasurements() + log.Info("measurements", measurements...) + + assert.Equal(t, 2, len(measurements)) + assert.Equal(t, identifier, measurements[0]) +} + +func TestStopWatch_AddShouldWork(t *testing.T) { + t.Parallel() + + identifier1 := "identifier1" + duration1 := time.Duration(5) + identifier2 := "identifier2" + duration2 := time.Duration(7) + + swSrc := NewStopWatch() + swSrc.identifiers = []string{identifier1, identifier2} + swSrc.elapsed[identifier1] = duration1 + swSrc.elapsed[identifier2] = duration2 + + sw := NewStopWatch() + + sw.Add(swSrc) + + data, _ := sw.GetContainingDuration() + assert.Equal(t, duration1, data[identifier1]) + assert.Equal(t, duration2, data[identifier2]) + + sw.Add(swSrc) + + data, _ = sw.GetContainingDuration() + assert.Equal(t, duration1*2, data[identifier1]) + assert.Equal(t, duration2*2, data[identifier2]) +} diff --git a/crypto/mock/hasherMock.go b/crypto/mock/hasherMock.go index 383b8f49977..17b88ebcbaa 100644 --- a/crypto/mock/hasherMock.go +++ b/crypto/mock/hasherMock.go @@ -11,7 +11,7 @@ type HasherMock struct { // Compute will output the SHA's equivalent of the input string func (sha HasherMock) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -29,9 +29,6 @@ func (HasherMock) Size() int { } // IsInterfaceNil returns true if there is no value under the interface -func (sha *HasherMock) IsInterfaceNil() bool { - if sha == nil { - return true - } +func (sha HasherMock) IsInterfaceNil() bool { return false } diff --git a/crypto/mock/hasherSpongeMock.go b/crypto/mock/hasherSpongeMock.go index 5104acc2a3d..0f0876ffd66 100644 --- a/crypto/mock/hasherSpongeMock.go +++ b/crypto/mock/hasherSpongeMock.go @@ -15,7 +15,7 @@ type HasherSpongeMock struct { // Compute will output the SHA's equivalent of the input string func (sha HasherSpongeMock) Compute(s string) []byte { h, _ := blake2b.New(hashSize, nil) - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -33,9 +33,6 @@ func (HasherSpongeMock) Size() int { } // IsInterfaceNil returns true if there is no value under the interface -func (sha *HasherSpongeMock) IsInterfaceNil() bool { - if sha == nil { - return true - } +func (sha HasherSpongeMock) IsInterfaceNil() bool { return false } diff --git a/data/block/block.go b/data/block/block.go index 37faad61983..3d44eb9ec9f 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -38,8 +38,10 @@ const ( SmartContractResultBlock Type = 3 // RewardsBlock identifies a miniblock holding accumulated rewards, both system generated and from tx fees RewardsBlock Type = 4 - // InvalidBlock identifies identifies an invalid miniblock + // InvalidBlock identifies a miniblock holding invalid transactions InvalidBlock Type = 5 + // ReceiptBlock identifies a miniblock holding receipts + ReceiptBlock Type = 6 ) // String returns the string representation of the Type @@ -57,6 +59,8 @@ func (bType Type) String() string { return "RewardsBody" case InvalidBlock: return "InvalidBlock" + case ReceiptBlock: + return "ReceiptBlock" default: return fmt.Sprintf("Unknown(%d)", bType) } @@ -106,8 +110,10 @@ type Header struct { RootHash []byte `capid:"14"` ValidatorStatsRootHash []byte `capid:"15"` MetaBlockHashes [][]byte `capid:"16"` - TxCount uint32 `capid:"17"` - ChainID []byte `capid:"18"` + EpochStartMetaHash []byte `capid:"17"` + TxCount uint32 `capid:"18"` + ReceiptsHash []byte `capid:"19"` + ChainID []byte `capid:"20"` } // Save saves the serialized data of a Block Header into a stream through Capnp protocol @@ -147,6 +153,7 @@ func HeaderCapnToGo(src capnp.HeaderCapn, dest *Header) *Header { dest.BlockBodyType = Type(src.BlockBodyType()) dest.Signature = src.Signature() dest.LeaderSignature = src.LeaderSignature() + dest.EpochStartMetaHash = src.EpochStartMetaHash() dest.ChainID = src.Chainid() mbLength := src.MiniBlockHeaders().Len() @@ -193,6 +200,8 @@ func HeaderGoToCapn(seg *capn.Segment, src *Header) capnp.HeaderCapn { dest.SetSignature(src.Signature) dest.SetLeaderSignature(src.LeaderSignature) dest.SetChainid(src.ChainID) + dest.SetEpochStartMetaHash(src.EpochStartMetaHash) + if len(src.MiniBlockHeaders) > 0 { miniBlockList := capnp.NewMiniBlockHeaderCapnList(seg, len(src.MiniBlockHeaders)) pList := capn.PointerList(miniBlockList) @@ -444,6 +453,16 @@ func (h *Header) GetTxCount() uint32 { return h.TxCount } +// SetShardID sets header shard ID +func (h *Header) SetShardID(shId uint32) { + h.ShardId = shId +} + +// GetReceiptsHash returns the hash of the receipts and intra-shard smart contract results +func (h *Header) GetReceiptsHash() []byte { + return h.ReceiptsHash +} + // SetNonce sets header nonce func (h *Header) SetNonce(n uint64) { h.Nonce = n @@ -557,10 +576,7 @@ func (b Body) IntegrityAndValidity() error { // IsInterfaceNil returns true if there is no value under the interface func (b Body) IsInterfaceNil() bool { - if b == nil { - return true - } - return false + return b == nil } // IsInterfaceNil returns true if there is no value under the interface @@ -571,6 +587,11 @@ func (h *Header) IsInterfaceNil() bool { return false } +// IsStartOfEpochBlock verifies if the block is of type start of epoch +func (h *Header) IsStartOfEpochBlock() bool { + return len(h.EpochStartMetaHash) > 0 +} + // ItemsInHeader gets the number of items(hashes) added in block header func (h *Header) ItemsInHeader() uint32 { itemsInHeader := len(h.MiniBlockHeaders) + len(h.PeerChanges) + len(h.MetaBlockHashes) @@ -596,3 +617,16 @@ func (h *Header) CheckChainID(reference []byte) error { return nil } + +// Clone the underlying data +func (mb *MiniBlock) Clone() *MiniBlock { + newMb := &MiniBlock{ + ReceiverShardID: mb.ReceiverShardID, + SenderShardID: mb.SenderShardID, + Type: mb.Type, + } + newMb.TxHashes = make([][]byte, len(mb.TxHashes)) + copy(newMb.TxHashes, mb.TxHashes) + + return newMb +} diff --git a/data/block/block_test.go b/data/block/block_test.go index bc63642fe35..613af9119f1 100644 --- a/data/block/block_test.go +++ b/data/block/block_test.go @@ -3,6 +3,7 @@ package block_test import ( "bytes" "errors" + "reflect" "testing" "github.com/ElrondNetwork/elrond-go/data" @@ -26,24 +27,25 @@ func TestHeader_SaveLoad(t *testing.T) { } h := block.Header{ - Nonce: uint64(1), - PrevHash: []byte("previous hash"), - PrevRandSeed: []byte("prev random seed"), - RandSeed: []byte("current random seed"), - PubKeysBitmap: []byte("pub key bitmap"), - ShardId: uint32(10), - TimeStamp: uint64(1234), - Round: uint64(1), - Epoch: uint32(1), - BlockBodyType: block.TxBlock, - Signature: []byte("signature"), - MiniBlockHeaders: []block.MiniBlockHeader{mb}, - PeerChanges: []block.PeerChange{pc}, - RootHash: []byte("root hash"), - MetaBlockHashes: make([][]byte, 0), - TxCount: uint32(10), - LeaderSignature: []byte("leader_sig"), - ChainID: []byte("chain ID"), + Nonce: uint64(1), + PrevHash: []byte("previous hash"), + PrevRandSeed: []byte("prev random seed"), + RandSeed: []byte("current random seed"), + PubKeysBitmap: []byte("pub key bitmap"), + ShardId: uint32(10), + TimeStamp: uint64(1234), + Round: uint64(1), + Epoch: uint32(1), + BlockBodyType: block.TxBlock, + Signature: []byte("signature"), + MiniBlockHeaders: []block.MiniBlockHeader{mb}, + PeerChanges: []block.PeerChange{pc}, + RootHash: []byte("root hash"), + MetaBlockHashes: make([][]byte, 0), + TxCount: uint32(10), + EpochStartMetaHash: []byte("epochStart"), + LeaderSignature: []byte("leader_sig"), + ChainID: []byte("chain ID"), } var b bytes.Buffer @@ -440,3 +442,18 @@ func TestHeader_CheckChainID(t *testing.T) { assert.Nil(t, hdr.CheckChainID(okChainID)) assert.True(t, errors.Is(hdr.CheckChainID(wrongChainID), data.ErrInvalidChainID)) } + +func TestMiniBlock_Clone(t *testing.T) { + t.Parallel() + + miniBlock := &block.MiniBlock{ + TxHashes: [][]byte{[]byte("something"), []byte("something2")}, + ReceiverShardID: 1, + SenderShardID: 2, + Type: 0, + } + + clonedMB := miniBlock.Clone() + + assert.True(t, reflect.DeepEqual(miniBlock, clonedMB)) +} diff --git a/data/block/capnp/schema.capnp b/data/block/capnp/schema.capnp index 23c8df42656..e79d02c6365 100644 --- a/data/block/capnp/schema.capnp +++ b/data/block/capnp/schema.capnp @@ -22,8 +22,9 @@ struct HeaderCapn { rootHash @14: Data; validatorStatsRootHash @15: Data; metaHdrHashes @16: List(Data); - txCount @17: UInt32; - chainid @18: Data; + epochStartMetaHash @17: Data; + txCount @18: UInt32; + chainid @19: Data; } struct MiniBlockHeaderCapn { diff --git a/data/block/capnp/schema.capnp.go b/data/block/capnp/schema.capnp.go index ba09b8dd9fc..b1e72610f25 100644 --- a/data/block/capnp/schema.capnp.go +++ b/data/block/capnp/schema.capnp.go @@ -12,9 +12,9 @@ import ( type HeaderCapn C.Struct -func NewHeaderCapn(s *C.Segment) HeaderCapn { return HeaderCapn(s.NewStruct(40, 12)) } -func NewRootHeaderCapn(s *C.Segment) HeaderCapn { return HeaderCapn(s.NewRootStruct(40, 12)) } -func AutoNewHeaderCapn(s *C.Segment) HeaderCapn { return HeaderCapn(s.NewStructAR(40, 12)) } +func NewHeaderCapn(s *C.Segment) HeaderCapn { return HeaderCapn(s.NewStruct(40, 13)) } +func NewRootHeaderCapn(s *C.Segment) HeaderCapn { return HeaderCapn(s.NewRootStruct(40, 13)) } +func AutoNewHeaderCapn(s *C.Segment) HeaderCapn { return HeaderCapn(s.NewStructAR(40, 13)) } func ReadRootHeaderCapn(s *C.Segment) HeaderCapn { return HeaderCapn(s.Root(0).ToStruct()) } func (s HeaderCapn) Nonce() uint64 { return C.Struct(s).Get64(0) } func (s HeaderCapn) SetNonce(v uint64) { C.Struct(s).Set64(0, v) } @@ -56,12 +56,14 @@ func (s HeaderCapn) ValidatorStatsRootHash() []byte { return C.Struct(s).G func (s HeaderCapn) SetValidatorStatsRootHash(v []byte) { C.Struct(s).SetObject(9, s.Segment.NewData(v)) } -func (s HeaderCapn) MetaHdrHashes() C.DataList { return C.DataList(C.Struct(s).GetObject(10)) } -func (s HeaderCapn) SetMetaHdrHashes(v C.DataList) { C.Struct(s).SetObject(10, C.Object(v)) } -func (s HeaderCapn) TxCount() uint32 { return C.Struct(s).Get32(36) } -func (s HeaderCapn) SetTxCount(v uint32) { C.Struct(s).Set32(36, v) } -func (s HeaderCapn) Chainid() []byte { return C.Struct(s).GetObject(11).ToData() } -func (s HeaderCapn) SetChainid(v []byte) { C.Struct(s).SetObject(11, s.Segment.NewData(v)) } +func (s HeaderCapn) MetaHdrHashes() C.DataList { return C.DataList(C.Struct(s).GetObject(10)) } +func (s HeaderCapn) SetMetaHdrHashes(v C.DataList) { C.Struct(s).SetObject(10, C.Object(v)) } +func (s HeaderCapn) EpochStartMetaHash() []byte { return C.Struct(s).GetObject(11).ToData() } +func (s HeaderCapn) SetEpochStartMetaHash(v []byte) { C.Struct(s).SetObject(11, s.Segment.NewData(v)) } +func (s HeaderCapn) TxCount() uint32 { return C.Struct(s).Get32(36) } +func (s HeaderCapn) SetTxCount(v uint32) { C.Struct(s).Set32(36, v) } +func (s HeaderCapn) Chainid() []byte { return C.Struct(s).GetObject(12).ToData() } +func (s HeaderCapn) SetChainid(v []byte) { C.Struct(s).SetObject(12, s.Segment.NewData(v)) } func (s HeaderCapn) WriteJSON(w io.Writer) error { b := bufio.NewWriter(w) var err error @@ -440,6 +442,25 @@ func (s HeaderCapn) WriteJSON(w io.Writer) error { if err != nil { return err } + _, err = b.WriteString("\"epochStartMetaHash\":") + if err != nil { + return err + } + { + s := s.EpochStartMetaHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } _, err = b.WriteString("\"txCount\":") if err != nil { return err @@ -864,6 +885,25 @@ func (s HeaderCapn) WriteCapLit(w io.Writer) error { if err != nil { return err } + _, err = b.WriteString("epochStartMetaHash = ") + if err != nil { + return err + } + { + s := s.EpochStartMetaHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } _, err = b.WriteString("txCount = ") if err != nil { return err @@ -914,7 +954,7 @@ func (s HeaderCapn) MarshalCapLit() ([]byte, error) { type HeaderCapn_List C.PointerList func NewHeaderCapnList(s *C.Segment, sz int) HeaderCapn_List { - return HeaderCapn_List(s.NewCompositeList(40, 12, sz)) + return HeaderCapn_List(s.NewCompositeList(40, 13, sz)) } func (s HeaderCapn_List) Len() int { return C.PointerList(s).Len() } func (s HeaderCapn_List) At(i int) HeaderCapn { return HeaderCapn(C.PointerList(s).At(i).ToStruct()) } diff --git a/data/block/capnp/schema.metablock.capnp b/data/block/capnp/schema.metablock.capnp index 7531b7d747f..47a88f413b3 100644 --- a/data/block/capnp/schema.metablock.capnp +++ b/data/block/capnp/schema.metablock.capnp @@ -32,6 +32,19 @@ struct ShardDataCapn { nonce @9: UInt64; } +struct FinalizedHeadersCapn { + shardId @0: UInt32; + headerHash @1: Data; + rootHash @2: Data; + firstPendingMetaBlock @3: Data; + lastFinishedMetaBlock @4: Data; + pendingMiniBlockHeaders @5: List(ShardMiniBlockHeaderCapn); +} + +struct EpochStartCapn { + lastFinalizedHeaders @0: List(FinalizedHeadersCapn); +} + struct MetaBlockCapn { nonce @0: UInt64; epoch @1: UInt32; @@ -43,13 +56,14 @@ struct MetaBlockCapn { leaderSignature @7: Data; pubKeysBitmap @8: Data; prevHash @9: Data; - prevRandSeed @10: Data; + prevRandSeed @10: Data; randSeed @11: Data; rootHash @12: Data; validatorStatsRootHash @13: Data; txCount @14: UInt32; miniBlockHeaders @15: List(MiniBlockHeaderCapn); - chainid @16: Data; + epochStart @16: EpochStartCapn; + chainid @17: Data; } ##compile with: diff --git a/data/block/capnp/schema.metablock.capnp.go b/data/block/capnp/schema.metablock.capnp.go index 85fc7dc1f7d..5d19d1a38e3 100644 --- a/data/block/capnp/schema.metablock.capnp.go +++ b/data/block/capnp/schema.metablock.capnp.go @@ -992,11 +992,489 @@ func (s ShardDataCapn_List) ToArray() []ShardDataCapn { } func (s ShardDataCapn_List) Set(i int, item ShardDataCapn) { C.PointerList(s).Set(i, C.Object(item)) } +type FinalizedHeadersCapn C.Struct + +func NewFinalizedHeadersCapn(s *C.Segment) FinalizedHeadersCapn { + return FinalizedHeadersCapn(s.NewStruct(8, 5)) +} +func NewRootFinalizedHeadersCapn(s *C.Segment) FinalizedHeadersCapn { + return FinalizedHeadersCapn(s.NewRootStruct(8, 5)) +} +func AutoNewFinalizedHeadersCapn(s *C.Segment) FinalizedHeadersCapn { + return FinalizedHeadersCapn(s.NewStructAR(8, 5)) +} +func ReadRootFinalizedHeadersCapn(s *C.Segment) FinalizedHeadersCapn { + return FinalizedHeadersCapn(s.Root(0).ToStruct()) +} +func (s FinalizedHeadersCapn) ShardId() uint32 { return C.Struct(s).Get32(0) } +func (s FinalizedHeadersCapn) SetShardId(v uint32) { C.Struct(s).Set32(0, v) } +func (s FinalizedHeadersCapn) HeaderHash() []byte { return C.Struct(s).GetObject(0).ToData() } +func (s FinalizedHeadersCapn) SetHeaderHash(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } +func (s FinalizedHeadersCapn) RootHash() []byte { return C.Struct(s).GetObject(1).ToData() } +func (s FinalizedHeadersCapn) SetRootHash(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } +func (s FinalizedHeadersCapn) FirstPendingMetaBlock() []byte { return C.Struct(s).GetObject(2).ToData() } +func (s FinalizedHeadersCapn) SetFirstPendingMetaBlock(v []byte) { + C.Struct(s).SetObject(2, s.Segment.NewData(v)) +} +func (s FinalizedHeadersCapn) LastFinishedMetaBlock() []byte { return C.Struct(s).GetObject(3).ToData() } +func (s FinalizedHeadersCapn) SetLastFinishedMetaBlock(v []byte) { + C.Struct(s).SetObject(3, s.Segment.NewData(v)) +} +func (s FinalizedHeadersCapn) PendingMiniBlockHeaders() ShardMiniBlockHeaderCapn_List { + return ShardMiniBlockHeaderCapn_List(C.Struct(s).GetObject(4)) +} +func (s FinalizedHeadersCapn) SetPendingMiniBlockHeaders(v ShardMiniBlockHeaderCapn_List) { + C.Struct(s).SetObject(4, C.Object(v)) +} +func (s FinalizedHeadersCapn) WriteJSON(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('{') + if err != nil { + return err + } + _, err = b.WriteString("\"shardId\":") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"headerHash\":") + if err != nil { + return err + } + { + s := s.HeaderHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"rootHash\":") + if err != nil { + return err + } + { + s := s.RootHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"firstPendingMetaBlock\":") + if err != nil { + return err + } + { + s := s.FirstPendingMetaBlock() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"lastFinishedMetaBlock\":") + if err != nil { + return err + } + { + s := s.LastFinishedMetaBlock() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"pendingMiniBlockHeaders\":") + if err != nil { + return err + } + { + s := s.PendingMiniBlockHeaders() + { + err = b.WriteByte('[') + if err != nil { + return err + } + for i, s := range s.ToArray() { + if i != 0 { + _, err = b.WriteString(", ") + } + if err != nil { + return err + } + err = s.WriteJSON(b) + if err != nil { + return err + } + } + err = b.WriteByte(']') + } + if err != nil { + return err + } + } + err = b.WriteByte('}') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s FinalizedHeadersCapn) MarshalJSON() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteJSON(&b) + return b.Bytes(), err +} +func (s FinalizedHeadersCapn) WriteCapLit(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('(') + if err != nil { + return err + } + _, err = b.WriteString("shardId = ") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("headerHash = ") + if err != nil { + return err + } + { + s := s.HeaderHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("rootHash = ") + if err != nil { + return err + } + { + s := s.RootHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("firstPendingMetaBlock = ") + if err != nil { + return err + } + { + s := s.FirstPendingMetaBlock() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("lastFinishedMetaBlock = ") + if err != nil { + return err + } + { + s := s.LastFinishedMetaBlock() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("pendingMiniBlockHeaders = ") + if err != nil { + return err + } + { + s := s.PendingMiniBlockHeaders() + { + err = b.WriteByte('[') + if err != nil { + return err + } + for i, s := range s.ToArray() { + if i != 0 { + _, err = b.WriteString(", ") + } + if err != nil { + return err + } + err = s.WriteCapLit(b) + if err != nil { + return err + } + } + err = b.WriteByte(']') + } + if err != nil { + return err + } + } + err = b.WriteByte(')') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s FinalizedHeadersCapn) MarshalCapLit() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteCapLit(&b) + return b.Bytes(), err +} + +type FinalizedHeadersCapn_List C.PointerList + +func NewFinalizedHeadersCapnList(s *C.Segment, sz int) FinalizedHeadersCapn_List { + return FinalizedHeadersCapn_List(s.NewCompositeList(8, 5, sz)) +} +func (s FinalizedHeadersCapn_List) Len() int { return C.PointerList(s).Len() } +func (s FinalizedHeadersCapn_List) At(i int) FinalizedHeadersCapn { + return FinalizedHeadersCapn(C.PointerList(s).At(i).ToStruct()) +} +func (s FinalizedHeadersCapn_List) ToArray() []FinalizedHeadersCapn { + n := s.Len() + a := make([]FinalizedHeadersCapn, n) + for i := 0; i < n; i++ { + a[i] = s.At(i) + } + return a +} +func (s FinalizedHeadersCapn_List) Set(i int, item FinalizedHeadersCapn) { + C.PointerList(s).Set(i, C.Object(item)) +} + +type EpochStartCapn C.Struct + +func NewEpochStartCapn(s *C.Segment) EpochStartCapn { return EpochStartCapn(s.NewStruct(0, 1)) } +func NewRootEpochStartCapn(s *C.Segment) EpochStartCapn { return EpochStartCapn(s.NewRootStruct(0, 1)) } +func AutoNewEpochStartCapn(s *C.Segment) EpochStartCapn { return EpochStartCapn(s.NewStructAR(0, 1)) } +func ReadRootEpochStartCapn(s *C.Segment) EpochStartCapn { return EpochStartCapn(s.Root(0).ToStruct()) } +func (s EpochStartCapn) LastFinalizedHeaders() FinalizedHeadersCapn_List { + return FinalizedHeadersCapn_List(C.Struct(s).GetObject(0)) +} +func (s EpochStartCapn) SetLastFinalizedHeaders(v FinalizedHeadersCapn_List) { + C.Struct(s).SetObject(0, C.Object(v)) +} +func (s EpochStartCapn) WriteJSON(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('{') + if err != nil { + return err + } + _, err = b.WriteString("\"lastFinalizedHeaders\":") + if err != nil { + return err + } + { + s := s.LastFinalizedHeaders() + { + err = b.WriteByte('[') + if err != nil { + return err + } + for i, s := range s.ToArray() { + if i != 0 { + _, err = b.WriteString(", ") + } + if err != nil { + return err + } + err = s.WriteJSON(b) + if err != nil { + return err + } + } + err = b.WriteByte(']') + } + if err != nil { + return err + } + } + err = b.WriteByte('}') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s EpochStartCapn) MarshalJSON() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteJSON(&b) + return b.Bytes(), err +} +func (s EpochStartCapn) WriteCapLit(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('(') + if err != nil { + return err + } + _, err = b.WriteString("lastFinalizedHeaders = ") + if err != nil { + return err + } + { + s := s.LastFinalizedHeaders() + { + err = b.WriteByte('[') + if err != nil { + return err + } + for i, s := range s.ToArray() { + if i != 0 { + _, err = b.WriteString(", ") + } + if err != nil { + return err + } + err = s.WriteCapLit(b) + if err != nil { + return err + } + } + err = b.WriteByte(']') + } + if err != nil { + return err + } + } + err = b.WriteByte(')') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s EpochStartCapn) MarshalCapLit() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteCapLit(&b) + return b.Bytes(), err +} + +type EpochStartCapn_List C.PointerList + +func NewEpochStartCapnList(s *C.Segment, sz int) EpochStartCapn_List { + return EpochStartCapn_List(s.NewCompositeList(0, 1, sz)) +} +func (s EpochStartCapn_List) Len() int { return C.PointerList(s).Len() } +func (s EpochStartCapn_List) At(i int) EpochStartCapn { + return EpochStartCapn(C.PointerList(s).At(i).ToStruct()) +} +func (s EpochStartCapn_List) ToArray() []EpochStartCapn { + n := s.Len() + a := make([]EpochStartCapn, n) + for i := 0; i < n; i++ { + a[i] = s.At(i) + } + return a +} +func (s EpochStartCapn_List) Set(i int, item EpochStartCapn) { C.PointerList(s).Set(i, C.Object(item)) } + type MetaBlockCapn C.Struct -func NewMetaBlockCapn(s *C.Segment) MetaBlockCapn { return MetaBlockCapn(s.NewStruct(32, 12)) } -func NewRootMetaBlockCapn(s *C.Segment) MetaBlockCapn { return MetaBlockCapn(s.NewRootStruct(32, 12)) } -func AutoNewMetaBlockCapn(s *C.Segment) MetaBlockCapn { return MetaBlockCapn(s.NewStructAR(32, 12)) } +func NewMetaBlockCapn(s *C.Segment) MetaBlockCapn { return MetaBlockCapn(s.NewStruct(32, 13)) } +func NewRootMetaBlockCapn(s *C.Segment) MetaBlockCapn { return MetaBlockCapn(s.NewRootStruct(32, 13)) } +func AutoNewMetaBlockCapn(s *C.Segment) MetaBlockCapn { return MetaBlockCapn(s.NewStructAR(32, 13)) } func ReadRootMetaBlockCapn(s *C.Segment) MetaBlockCapn { return MetaBlockCapn(s.Root(0).ToStruct()) } func (s MetaBlockCapn) Nonce() uint64 { return C.Struct(s).Get64(0) } func (s MetaBlockCapn) SetNonce(v uint64) { C.Struct(s).Set64(0, v) } @@ -1040,8 +1518,12 @@ func (s MetaBlockCapn) MiniBlockHeaders() MiniBlockHeaderCapn_List { func (s MetaBlockCapn) SetMiniBlockHeaders(v MiniBlockHeaderCapn_List) { C.Struct(s).SetObject(10, C.Object(v)) } -func (s MetaBlockCapn) Chainid() []byte { return C.Struct(s).GetObject(11).ToData() } -func (s MetaBlockCapn) SetChainid(v []byte) { C.Struct(s).SetObject(11, s.Segment.NewData(v)) } +func (s MetaBlockCapn) EpochStart() EpochStartCapn { + return EpochStartCapn(C.Struct(s).GetObject(11).ToStruct()) +} +func (s MetaBlockCapn) SetEpochStart(v EpochStartCapn) { C.Struct(s).SetObject(11, C.Object(v)) } +func (s MetaBlockCapn) Chainid() []byte { return C.Struct(s).GetObject(12).ToData() } +func (s MetaBlockCapn) SetChainid(v []byte) { C.Struct(s).SetObject(12, s.Segment.NewData(v)) } func (s MetaBlockCapn) WriteJSON(w io.Writer) error { b := bufio.NewWriter(w) var err error @@ -1397,6 +1879,21 @@ func (s MetaBlockCapn) WriteJSON(w io.Writer) error { if err != nil { return err } + _, err = b.WriteString("\"epochStart\":") + if err != nil { + return err + } + { + s := s.EpochStart() + err = s.WriteJSON(b) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } _, err = b.WriteString("\"chainid\":") if err != nil { return err @@ -1779,6 +2276,21 @@ func (s MetaBlockCapn) WriteCapLit(w io.Writer) error { if err != nil { return err } + _, err = b.WriteString("epochStart = ") + if err != nil { + return err + } + { + s := s.EpochStart() + err = s.WriteCapLit(b) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } _, err = b.WriteString("chainid = ") if err != nil { return err @@ -1810,7 +2322,7 @@ func (s MetaBlockCapn) MarshalCapLit() ([]byte, error) { type MetaBlockCapn_List C.PointerList func NewMetaBlockCapnList(s *C.Segment, sz int) MetaBlockCapn_List { - return MetaBlockCapn_List(s.NewCompositeList(32, 12, sz)) + return MetaBlockCapn_List(s.NewCompositeList(32, 13, sz)) } func (s MetaBlockCapn_List) Len() int { return C.PointerList(s).Len() } func (s MetaBlockCapn_List) At(i int) MetaBlockCapn { diff --git a/data/block/metaBlock.go b/data/block/metaBlock.go index de68397c0c6..8d71f2ffbac 100644 --- a/data/block/metaBlock.go +++ b/data/block/metaBlock.go @@ -18,7 +18,7 @@ type PeerAction uint8 // Constants mapping the actions that a node can take const ( - PeerRegistrantion PeerAction = iota + 1 + PeerRegistration PeerAction = iota + 1 PeerUnstaking PeerDeregistration PeerJailed @@ -29,7 +29,7 @@ const ( func (pa PeerAction) String() string { switch pa { - case PeerRegistrantion: + case PeerRegistration: return "PeerRegistration" case PeerUnstaking: return "PeerUnstaking" @@ -81,6 +81,21 @@ type ShardData struct { Nonce uint64 `capid:"9"` } +// EpochStartShardData hold the last finalized headers hash and state root hash +type EpochStartShardData struct { + ShardId uint32 `capid:"0"` + HeaderHash []byte `capid:"1"` + RootHash []byte `capid:"2"` + FirstPendingMetaBlock []byte `capid:"3"` + LastFinishedMetaBlock []byte `capid:"4"` + PendingMiniBlockHeaders []ShardMiniBlockHeader `capid:"5"` +} + +// EpochStart holds the block information for end-of-epoch +type EpochStart struct { + LastFinalizedHeaders []EpochStartShardData `capid:"1"` +} + // MetaBlock holds the data that will be saved to the metachain each round type MetaBlock struct { Nonce uint64 `capid:"0"` @@ -99,7 +114,9 @@ type MetaBlock struct { ValidatorStatsRootHash []byte `capid:"13"` TxCount uint32 `capid:"14"` MiniBlockHeaders []MiniBlockHeader `capid:"15"` - ChainID []byte `capid:"16"` + ReceiptsHash []byte `capid:"16"` + EpochStart EpochStart `capid:"17"` + ChainID []byte `capid:"18"` } // Save saves the serialized data of a PeerData into a stream through Capnp protocol @@ -159,6 +176,25 @@ func (m *MetaBlock) Load(r io.Reader) error { return nil } +// Save saves the serialized data of a ShardData into a stream through Capnp protocol +func (e *EpochStart) Save(w io.Writer) error { + seg := capn.NewBuffer(nil) + EpochStartGoToCapn(seg, *e) + _, err := seg.WriteTo(w) + return err +} + +// Load loads the data from the stream into a EpochStart object through Capnp protocol +func (e *EpochStart) Load(r io.Reader) error { + capMsg, err := capn.ReadFromStream(r, nil) + if err != nil { + return err + } + z := capnp.ReadRootEpochStartCapn(capMsg) + EpochStartCapnToGo(z, e) + return nil +} + // PeerDataGoToCapn is a helper function to copy fields from a Peer Data object to a PeerDataCapn object func PeerDataGoToCapn(seg *capn.Segment, src *PeerData) capnp.PeerDataCapn { dest := capnp.AutoNewPeerDataCapn(seg) @@ -264,15 +300,88 @@ func ShardDataCapnToGo(src capnp.ShardDataCapn, dest *ShardData) *ShardData { return dest } +// EpochStartShardDataGoToCapn is a helper function to copy fields from a FinalizedHeaderHeader object to a +// EpochStartShardDataCapn object +func EpochStartShardDataGoToCapn(seg *capn.Segment, src *EpochStartShardData) capnp.FinalizedHeadersCapn { + dest := capnp.AutoNewFinalizedHeadersCapn(seg) + + dest.SetRootHash(src.RootHash) + dest.SetHeaderHash(src.HeaderHash) + dest.SetShardId(src.ShardId) + dest.SetFirstPendingMetaBlock(src.FirstPendingMetaBlock) + dest.SetLastFinishedMetaBlock(src.LastFinishedMetaBlock) + + if len(src.PendingMiniBlockHeaders) > 0 { + typedList := capnp.NewShardMiniBlockHeaderCapnList(seg, len(src.PendingMiniBlockHeaders)) + plist := capn.PointerList(typedList) + + for i, elem := range src.PendingMiniBlockHeaders { + _ = plist.Set(i, capn.Object(ShardMiniBlockHeaderGoToCapn(seg, &elem))) + } + dest.SetPendingMiniBlockHeaders(typedList) + } + + return dest +} + +// EpochStartShardDataCapnToGo is a helper function to copy fields from a FinalizedHeaderCapn object to a +// EpochStartShardData object +func EpochStartShardDataCapnToGo(src capnp.FinalizedHeadersCapn, dest *EpochStartShardData) *EpochStartShardData { + if dest == nil { + dest = &EpochStartShardData{} + } + + dest.RootHash = src.RootHash() + dest.HeaderHash = src.HeaderHash() + dest.ShardId = src.ShardId() + dest.FirstPendingMetaBlock = src.FirstPendingMetaBlock() + dest.LastFinishedMetaBlock = src.LastFinishedMetaBlock() + + n := src.PendingMiniBlockHeaders().Len() + dest.PendingMiniBlockHeaders = make([]ShardMiniBlockHeader, n) + for i := 0; i < n; i++ { + dest.PendingMiniBlockHeaders[i] = *ShardMiniBlockHeaderCapnToGo(src.PendingMiniBlockHeaders().At(i), nil) + } + + return dest +} + +// EpochStartGoToCapn is a helper function to copy fields from a ShardData object to a ShardDataCapn object +func EpochStartGoToCapn(seg *capn.Segment, src EpochStart) capnp.EpochStartCapn { + dest := capnp.AutoNewEpochStartCapn(seg) + + if len(src.LastFinalizedHeaders) > 0 { + typedList := capnp.NewFinalizedHeadersCapnList(seg, len(src.LastFinalizedHeaders)) + pList := capn.PointerList(typedList) + + for i, elem := range src.LastFinalizedHeaders { + _ = pList.Set(i, capn.Object(EpochStartShardDataGoToCapn(seg, &elem))) + } + dest.SetLastFinalizedHeaders(typedList) + } + + return dest +} + +// EpochStartCapnToGo is a helper function to copy fields from a ShardDataCapn object to a ShardData object +func EpochStartCapnToGo(src capnp.EpochStartCapn, dest *EpochStart) *EpochStart { + if dest == nil { + dest = &EpochStart{} + } + + n := src.LastFinalizedHeaders().Len() + dest.LastFinalizedHeaders = make([]EpochStartShardData, n) + for i := 0; i < n; i++ { + dest.LastFinalizedHeaders[i] = *EpochStartShardDataCapnToGo(src.LastFinalizedHeaders().At(i), nil) + } + + return dest +} + // MetaBlockGoToCapn is a helper function to copy fields from a MetaBlock object to a MetaBlockCapn object func MetaBlockGoToCapn(seg *capn.Segment, src *MetaBlock) capnp.MetaBlockCapn { dest := capnp.AutoNewMetaBlockCapn(seg) - dest.SetNonce(src.Nonce) - dest.SetEpoch(src.Epoch) - dest.SetRound(src.Round) - dest.SetTimeStamp(src.TimeStamp) - if len(src.ShardInfo) > 0 { typedList := capnp.NewShardDataCapnList(seg, len(src.ShardInfo)) plist := capn.PointerList(typedList) @@ -311,6 +420,11 @@ func MetaBlockGoToCapn(seg *capn.Segment, src *MetaBlock) capnp.MetaBlockCapn { dest.SetRootHash(src.RootHash) dest.SetValidatorStatsRootHash(src.ValidatorStatsRootHash) dest.SetTxCount(src.TxCount) + dest.SetNonce(src.Nonce) + dest.SetEpoch(src.Epoch) + dest.SetRound(src.Round) + dest.SetTimeStamp(src.TimeStamp) + dest.SetEpochStart(EpochStartGoToCapn(seg, src.EpochStart)) dest.SetLeaderSignature(src.LeaderSignature) dest.SetChainid(src.ChainID) @@ -322,10 +436,6 @@ func MetaBlockCapnToGo(src capnp.MetaBlockCapn, dest *MetaBlock) *MetaBlock { if dest == nil { dest = &MetaBlock{} } - dest.Nonce = src.Nonce() - dest.Epoch = src.Epoch() - dest.Round = src.Round() - dest.TimeStamp = src.TimeStamp() n := src.ShardInfo().Len() dest.ShardInfo = make([]ShardData, n) @@ -353,6 +463,11 @@ func MetaBlockCapnToGo(src capnp.MetaBlockCapn, dest *MetaBlock) *MetaBlock { dest.ValidatorStatsRootHash = src.ValidatorStatsRootHash() dest.TxCount = src.TxCount() dest.LeaderSignature = src.LeaderSignature() + dest.Nonce = src.Nonce() + dest.Epoch = src.Epoch() + dest.Round = src.Round() + dest.TimeStamp = src.TimeStamp() + dest.EpochStart = *EpochStartCapnToGo(src.EpochStart(), nil) dest.ChainID = src.Chainid() return dest @@ -433,6 +548,15 @@ func (m *MetaBlock) GetTxCount() uint32 { return m.TxCount } +// GetReceiptsHash returns the hash of the receipts and intra-shard smart contract results +func (m *MetaBlock) GetReceiptsHash() []byte { + return m.ReceiptsHash +} + +// SetShardID sets header shard ID +func (m *MetaBlock) SetShardID(_ uint32) { +} + // SetNonce sets header nonce func (m *MetaBlock) SetNonce(n uint64) { m.Nonce = n @@ -548,6 +672,11 @@ func (m *MetaBlock) ItemsInHeader() uint32 { return uint32(itemsInHeader) } +// IsStartOfEpochBlock verifies if the block is of type start of epoch +func (m *MetaBlock) IsStartOfEpochBlock() bool { + return len(m.EpochStart.LastFinalizedHeaders) > 0 +} + // ItemsInBody gets the number of items(hashes) added in block body func (m *MetaBlock) ItemsInBody() uint32 { return m.TxCount diff --git a/data/block/metaBlock_test.go b/data/block/metaBlock_test.go index 96267c33451..eb816b2f95d 100644 --- a/data/block/metaBlock_test.go +++ b/data/block/metaBlock_test.go @@ -15,7 +15,7 @@ import ( func TestPeerData_SaveLoad(t *testing.T) { pd := block.PeerData{ PublicKey: []byte("public key"), - Action: block.PeerRegistrantion, + Action: block.PeerRegistration, TimeStamp: uint64(1234), ValueChange: big.NewInt(1), Address: []byte("address"), @@ -57,11 +57,42 @@ func TestShardData_SaveLoad(t *testing.T) { assert.Equal(t, loadSd, sd) } +func TestEpochStart_SaveLoad(t *testing.T) { + + mbh := block.ShardMiniBlockHeader{ + Hash: []byte("miniblock hash"), + SenderShardID: uint32(0), + ReceiverShardID: uint32(1), + TxCount: uint32(1), + } + + lastFinalHdr := block.EpochStartShardData{ + ShardId: 0, + HeaderHash: []byte("headerhash"), + RootHash: []byte("roothash"), + FirstPendingMetaBlock: []byte("firstPending"), + LastFinishedMetaBlock: []byte("lastfinished"), + PendingMiniBlockHeaders: []block.ShardMiniBlockHeader{mbh}, + } + + epochStart := block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{lastFinalHdr}, + } + + var b bytes.Buffer + _ = epochStart.Save(&b) + + loadEpoch := block.EpochStart{} + _ = loadEpoch.Load(&b) + + assert.Equal(t, loadEpoch, epochStart) +} + func TestMetaBlock_SaveLoad(t *testing.T) { pd := block.PeerData{ Address: []byte("address"), PublicKey: []byte("public key"), - Action: block.PeerRegistrantion, + Action: block.PeerRegistration, TimeStamp: uint64(1234), ValueChange: big.NewInt(1), } @@ -90,6 +121,15 @@ func TestMetaBlock_SaveLoad(t *testing.T) { TxCount: uint32(10), } + lastFinalHdr := block.EpochStartShardData{ + ShardId: 0, + HeaderHash: []byte("headerhash"), + RootHash: []byte("roothash"), + FirstPendingMetaBlock: []byte("firstPending"), + LastFinishedMetaBlock: []byte("lastfinished"), + PendingMiniBlockHeaders: []block.ShardMiniBlockHeader{mbh}, + } + mb := block.MetaBlock{ Nonce: uint64(1), Epoch: uint32(1), @@ -108,6 +148,9 @@ func TestMetaBlock_SaveLoad(t *testing.T) { MiniBlockHeaders: []block.MiniBlockHeader{mbHdr}, LeaderSignature: []byte("leader_sign"), ChainID: []byte("chain ID"), + EpochStart: block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{lastFinalHdr}, + }, } var b bytes.Buffer err := mb.Save(&b) @@ -117,7 +160,7 @@ func TestMetaBlock_SaveLoad(t *testing.T) { err = loadMb.Load(&b) assert.Nil(t, err) - assert.Equal(t, loadMb, mb) + assert.Equal(t, mb, loadMb) } func TestMetaBlock_GetEpoch(t *testing.T) { diff --git a/data/errors.go b/data/errors.go index 3bceb01fe2f..da0cdd58c11 100644 --- a/data/errors.go +++ b/data/errors.go @@ -43,6 +43,15 @@ var ErrNilShardCoordinator = errors.New("nil shard coordinator") // ErrNilNodesCoordinator signals that nil shard coordinator was provided var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") +// ErrNilMarshalizer is raised when the NewTrie() function is called, but a marshalizer isn't provided +var ErrNilMarshalizer = errors.New("no marshalizer provided") + +// ErrNilDatabase is raised when a database operation is called, but no database is provided +var ErrNilDatabase = errors.New("no database provided") + +// ErrInvalidCacheSize is raised when the given size for the cache is invalid +var ErrInvalidCacheSize = errors.New("cache size is invalid") + // ErrInvalidValue signals that an invalid value has been provided such as NaN to an integer field var ErrInvalidValue = errors.New("invalid value") diff --git a/data/interface.go b/data/interface.go index 0c873fb5672..4ebecd96154 100644 --- a/data/interface.go +++ b/data/interface.go @@ -2,6 +2,19 @@ package data import ( "math/big" + + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" +) + +// TriePruningIdentifier is the type for trie pruning identifiers +type TriePruningIdentifier byte + +const ( + // OldRoot is appended to the key when oldHashes are added to the evictionWaitingList + OldRoot TriePruningIdentifier = 0 + // NewRoot is appended to the key when newHashes are added to the evictionWaitingList + NewRoot TriePruningIdentifier = 1 ) // HeaderHandler defines getters and setters for header data holder @@ -21,7 +34,9 @@ type HeaderHandler interface { GetChainID() []byte GetTimeStamp() uint64 GetTxCount() uint32 + GetReceiptsHash() []byte + SetShardID(shId uint32) SetNonce(n uint64) SetEpoch(e uint32) SetRound(r uint64) @@ -37,6 +52,7 @@ type HeaderHandler interface { SetChainID(chainID []byte) SetTxCount(txCount uint32) + IsStartOfEpochBlock() bool GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 IsInterfaceNil() bool @@ -81,14 +97,14 @@ type TransactionHandler interface { GetValue() *big.Int GetNonce() uint64 - GetData() string + GetData() []byte GetRecvAddress() []byte GetSndAddress() []byte GetGasLimit() uint64 GetGasPrice() uint64 SetValue(*big.Int) - SetData(string) + SetData([]byte) SetRecvAddress([]byte) SetSndAddress([]byte) } @@ -105,7 +121,16 @@ type Trie interface { Recreate(root []byte) (Trie, error) String() string DeepClone() (Trie, error) + CancelPrune(rootHash []byte, identifier TriePruningIdentifier) + Prune(rootHash []byte, identifier TriePruningIdentifier) error + TakeSnapshot(rootHash []byte) + SetCheckpoint(rootHash []byte) + ResetOldHashes() [][]byte + AppendToOldHashes([][]byte) + Database() DBWriteCacher + GetSerializedNodes([]byte, uint64) ([][]byte, error) GetAllLeaves() (map[string][]byte, error) + IsPruningEnabled() bool IsInterfaceNil() bool } @@ -113,5 +138,41 @@ type Trie interface { type DBWriteCacher interface { Put(key, val []byte) error Get(key []byte) ([]byte, error) + Remove(key []byte) error + IsInterfaceNil() bool +} + +// DBRemoveCacher is used to cache keys that will be deleted from the database +type DBRemoveCacher interface { + Put([]byte, [][]byte) error + Evict([]byte) ([][]byte, error) + GetSize() uint + IsInterfaceNil() bool +} + +// TrieSyncer synchronizes the trie, asking on the network for the missing nodes +type TrieSyncer interface { + StartSyncing(rootHash []byte) error + IsInterfaceNil() bool +} + +// StorageManager manages all trie storage operations +type StorageManager interface { + Database() DBWriteCacher + SetDatabase(cacher DBWriteCacher) + TakeSnapshot([]byte, marshal.Marshalizer, hashing.Hasher) + SetCheckpoint([]byte, marshal.Marshalizer, hashing.Hasher) + Prune([]byte) error + CancelPrune([]byte) + MarkForEviction([]byte, [][]byte) error + GetDbThatContainsHash([]byte) DBWriteCacher + Clone() StorageManager + IsPruningEnabled() bool + IsInterfaceNil() bool +} + +// TrieFactory creates new tries +type TrieFactory interface { + Create() (Trie, error) IsInterfaceNil() bool } diff --git a/data/mock/accountWrapperMock.go b/data/mock/accountWrapperMock.go index f3a38e5dbe0..8d35879a61f 100644 --- a/data/mock/accountWrapperMock.go +++ b/data/mock/accountWrapperMock.go @@ -25,7 +25,7 @@ func NewAccountWrapMock(adr state.AddressContainer, tracker state.AccountTracker return &AccountWrapMock{ address: adr, tracker: tracker, - trackableDataTrie: state.NewTrackableDataTrie(nil), + trackableDataTrie: state.NewTrackableDataTrie([]byte("identifier"), nil), } } diff --git a/data/mock/addressMock.go b/data/mock/addressMock.go index 163d3d7211e..e9d6636507c 100644 --- a/data/mock/addressMock.go +++ b/data/mock/addressMock.go @@ -24,7 +24,7 @@ func NewAddressMock() *AddressMock { buff := make([]byte, HasherMock{}.Size()) mutex.Lock() - r.Read(buff) + _, _ = r.Read(buff) mutex.Unlock() return &AddressMock{bytes: buff} @@ -42,8 +42,5 @@ func (address *AddressMock) Bytes() []byte { // IsInterfaceNil returns true if there is no value under the interface func (address *AddressMock) IsInterfaceNil() bool { - if address == nil { - return true - } - return false + return address == nil } diff --git a/data/mock/evictionWaitingListMock.go b/data/mock/evictionWaitingListMock.go new file mode 100644 index 00000000000..9ddbc4c030d --- /dev/null +++ b/data/mock/evictionWaitingListMock.go @@ -0,0 +1,93 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// EvictionWaitingList is a structure that caches keys that need to be removed from a certain database. +// If the cache is full, the keys will be stored in the underlying database. Writing at the same key in +// cacher and db will overwrite the previous values. This structure is not concurrent safe. +type EvictionWaitingList struct { + Cache map[string][][]byte + CacheSize uint + Db storage.Persister + Marshalizer marshal.Marshalizer +} + +// NewEvictionWaitingList creates a new instance of evictionWaitingList +func NewEvictionWaitingList(size uint, db storage.Persister, marshalizer marshal.Marshalizer) (*EvictionWaitingList, error) { + if size < 1 { + return nil, data.ErrInvalidCacheSize + } + if db == nil || db.IsInterfaceNil() { + return nil, data.ErrNilDatabase + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, data.ErrNilMarshalizer + } + + return &EvictionWaitingList{ + Cache: make(map[string][][]byte), + CacheSize: size, + Db: db, + Marshalizer: marshalizer, + }, nil +} + +// Put stores the given hashes in the eviction waiting list, in the position given by the root hash +func (ewl *EvictionWaitingList) Put(rootHash []byte, hashes [][]byte) error { + if uint(len(ewl.Cache)) < ewl.CacheSize { + ewl.Cache[string(rootHash)] = hashes + return nil + } + + marshalizedHashes, err := ewl.Marshalizer.Marshal(hashes) + if err != nil { + return err + } + + err = ewl.Db.Put(rootHash, marshalizedHashes) + if err != nil { + return err + } + + return nil +} + +// Evict returns and removes from the waiting list all the hashes from the position given by the root hash +func (ewl *EvictionWaitingList) Evict(rootHash []byte) ([][]byte, error) { + hashes, ok := ewl.Cache[string(rootHash)] + if ok { + delete(ewl.Cache, string(rootHash)) + return hashes, nil + } + + marshalizedHashes, err := ewl.Db.Get(rootHash) + if err != nil { + return nil, err + } + + err = ewl.Marshalizer.Unmarshal(&hashes, marshalizedHashes) + if err != nil { + return nil, err + } + + err = ewl.Db.Remove(rootHash) + if err != nil { + return nil, err + } + + return hashes, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ewl *EvictionWaitingList) IsInterfaceNil() bool { + return ewl == nil +} + +// GetSize returns the size of the cache +func (ewl *EvictionWaitingList) GetSize() uint { + return ewl.CacheSize +} diff --git a/data/mock/hasherMock.go b/data/mock/hasherMock.go index 7bdb135d985..bd7ed68ca2a 100644 --- a/data/mock/hasherMock.go +++ b/data/mock/hasherMock.go @@ -11,7 +11,7 @@ type HasherMock struct { // Compute will output the SHA's equivalent of the input string func (sha HasherMock) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -29,9 +29,6 @@ func (HasherMock) Size() int { } // IsInterfaceNil returns true if there is no value under the interface -func (sha *HasherMock) IsInterfaceNil() bool { - if sha == nil { - return true - } +func (sha HasherMock) IsInterfaceNil() bool { return false } diff --git a/data/mock/hasherMock127.go b/data/mock/hasherMock127.go index 10ec57464e2..192a031a947 100644 --- a/data/mock/hasherMock127.go +++ b/data/mock/hasherMock127.go @@ -24,8 +24,5 @@ func (HasherMock127) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (hash *HasherMock127) IsInterfaceNil() bool { - if hash == nil { - return true - } - return false + return hash == nil } diff --git a/data/mock/keccakMock.go b/data/mock/keccakMock.go index f415325837a..30f3535e6e9 100644 --- a/data/mock/keccakMock.go +++ b/data/mock/keccakMock.go @@ -16,7 +16,7 @@ func (k KeccakMock) Compute(s string) []byte { return k.EmptyHash() } h := sha3.NewLegacyKeccak256() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -35,8 +35,5 @@ func (KeccakMock) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (k *KeccakMock) IsInterfaceNil() bool { - if k == nil { - return true - } - return false + return k == nil } diff --git a/data/mock/memDbMock.go b/data/mock/memDbMock.go index ea1bd03ccb9..6a680ac9015 100644 --- a/data/mock/memDbMock.go +++ b/data/mock/memDbMock.go @@ -15,11 +15,11 @@ type MemDbMock struct { } // NewMemDbMock creates a new memorydb object -func NewMemDbMock() (*MemDbMock, error) { +func NewMemDbMock() *MemDbMock { return &MemDbMock{ db: make(map[string][]byte), mutx: sync.RWMutex{}, - }, nil + } } // Put adds the value to the (key, val) storage medium @@ -47,13 +47,16 @@ func (s *MemDbMock) Get(key []byte) ([]byte, error) { } // Has returns true if the given key is present in the persistence medium, false otherwise -func (s *MemDbMock) Has(key []byte) (bool, error) { +func (s *MemDbMock) Has(key []byte) error { s.mutx.RLock() defer s.mutx.RUnlock() _, ok := s.db[string(key)] + if !ok { + return errors.New("key not present") + } - return ok, nil + return nil } // Init initializes the storage medium and prepares it for usage @@ -88,6 +91,11 @@ func (s *MemDbMock) Destroy() error { return nil } +// DestroyClosed removes the already closed storage medium stored data +func (s *MemDbMock) DestroyClosed() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (s *MemDbMock) IsInterfaceNil() bool { if s == nil { diff --git a/data/mock/pathManagerStub.go b/data/mock/pathManagerStub.go new file mode 100644 index 00000000000..00d17097055 --- /dev/null +++ b/data/mock/pathManagerStub.go @@ -0,0 +1,30 @@ +package mock + +import ( + "fmt" +) + +type PathManagerStub struct { + PathForEpochCalled func(shardId string, epoch uint32, identifier string) string + PathForStaticCalled func(shardId string, identifier string) string +} + +func (p *PathManagerStub) PathForEpoch(shardId string, epoch uint32, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForEpochCalled(shardId, epoch, identifier) + } + + return fmt.Sprintf("Epoch_%d/Shard_%s/%s", epoch, shardId, identifier) +} + +func (p *PathManagerStub) PathForStatic(shardId string, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForStaticCalled(shardId, identifier) + } + + return fmt.Sprintf("Static/Shard_%s/%s", shardId, identifier) +} + +func (p *PathManagerStub) IsInterfaceNil() bool { + return p == nil +} diff --git a/data/mock/shardedDataStub.go b/data/mock/shardedDataStub.go index 688a94904dd..3fa0868838e 100644 --- a/data/mock/shardedDataStub.go +++ b/data/mock/shardedDataStub.go @@ -47,10 +47,6 @@ func (sd *ShardedDataStub) MergeShardStores(sourceCacheId, destCacheId string) { sd.MergeShardStoresCalled(sourceCacheId, destCacheId) } -func (sd *ShardedDataStub) MoveData(sourceCacheId, destCacheId string, key [][]byte) { - sd.MoveDataCalled(sourceCacheId, destCacheId, key) -} - func (sd *ShardedDataStub) Clear() { sd.ClearCalled() } diff --git a/data/mock/trieNodesResolverStub.go b/data/mock/trieNodesResolverStub.go new file mode 100644 index 00000000000..cced0b0e770 --- /dev/null +++ b/data/mock/trieNodesResolverStub.go @@ -0,0 +1,31 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type TrieNodesResolverStub struct { + RequestDataFromHashCalled func(hash []byte) error + ProcessReceivedMessageCalled func(message p2p.MessageP2P) error +} + +func (tnrs *TrieNodesResolverStub) RequestDataFromHash(hash []byte) error { + if tnrs.RequestDataFromHashCalled != nil { + return tnrs.RequestDataFromHashCalled(hash) + } + + return errNotImplemented +} + +func (tnrs *TrieNodesResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + if tnrs.ProcessReceivedMessageCalled != nil { + return tnrs.ProcessReceivedMessageCalled(message) + } + + return errNotImplemented +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tnrs *TrieNodesResolverStub) IsInterfaceNil() bool { + return tnrs == nil +} diff --git a/data/mock/trieStub.go b/data/mock/trieStub.go index 8d81238bec3..b80e196b9c3 100644 --- a/data/mock/trieStub.go +++ b/data/mock/trieStub.go @@ -9,16 +9,25 @@ import ( var errNotImplemented = errors.New("not implemented") type TrieStub struct { - GetCalled func(key []byte) ([]byte, error) - UpdateCalled func(key, value []byte) error - DeleteCalled func(key []byte) error - RootCalled func() ([]byte, error) - ProveCalled func(key []byte) ([][]byte, error) - VerifyProofCalled func(proofs [][]byte, key []byte) (bool, error) - CommitCalled func() error - RecreateCalled func(root []byte) (data.Trie, error) - DeepCloneCalled func() (data.Trie, error) - GetAllLeavesCalled func() (map[string][]byte, error) + GetCalled func(key []byte) ([]byte, error) + UpdateCalled func(key, value []byte) error + DeleteCalled func(key []byte) error + RootCalled func() ([]byte, error) + ProveCalled func(key []byte) ([][]byte, error) + VerifyProofCalled func(proofs [][]byte, key []byte) (bool, error) + CommitCalled func() error + RecreateCalled func(root []byte) (data.Trie, error) + DeepCloneCalled func() (data.Trie, error) + CancelPruneCalled func(rootHash []byte, identifier data.TriePruningIdentifier) + PruneCalled func(rootHash []byte, identifier data.TriePruningIdentifier) error + ResetOldHashesCalled func() [][]byte + AppendToOldHashesCalled func([][]byte) + TakeSnapshotCalled func(rootHash []byte) + SetCheckpointCalled func(rootHash []byte) + GetSerializedNodesCalled func([]byte, uint64) ([][]byte, error) + DatabaseCalled func() data.DBWriteCacher + GetAllLeavesCalled func() (map[string][]byte, error) + IsPruningEnabledCalled func() bool } func (ts *TrieStub) Get(key []byte) ([]byte, error) { @@ -103,8 +112,70 @@ func (ts *TrieStub) GetAllLeaves() (map[string][]byte, error) { // IsInterfaceNil returns true if there is no value under the interface func (ts *TrieStub) IsInterfaceNil() bool { - if ts == nil { - return true + return ts == nil +} + +// CancelPrune invalidates the hashes that correspond to the given root hash from the eviction waiting list +func (ts *TrieStub) CancelPrune(rootHash []byte, identifier data.TriePruningIdentifier) { + if ts.CancelPruneCalled != nil { + ts.CancelPruneCalled(rootHash, identifier) + } +} + +// Prune removes from the database all the old hashes that correspond to the given root hash +func (ts *TrieStub) Prune(rootHash []byte, identifier data.TriePruningIdentifier) error { + if ts.PruneCalled != nil { + return ts.PruneCalled(rootHash, identifier) + } + + return errNotImplemented +} + +// ResetOldHashes resets the oldHashes and oldRoot variables and returns the old hashes +func (ts *TrieStub) ResetOldHashes() [][]byte { + if ts.ResetOldHashesCalled != nil { + return ts.ResetOldHashesCalled() + } + + return nil +} + +// AppendToOldHashes appends the given hashes to the trie's oldHashes variable +func (ts *TrieStub) AppendToOldHashes(hashes [][]byte) { + if ts.AppendToOldHashesCalled != nil { + ts.AppendToOldHashesCalled(hashes) + } +} + +func (ts *TrieStub) TakeSnapshot(rootHash []byte) { + if ts.TakeSnapshotCalled != nil { + ts.TakeSnapshotCalled(rootHash) + } +} + +func (ts *TrieStub) SetCheckpoint(rootHash []byte) { + if ts.SetCheckpointCalled != nil { + ts.SetCheckpointCalled(rootHash) + } +} + +func (ts *TrieStub) GetSerializedNodes(hash []byte, maxBuffToSend uint64) ([][]byte, error) { + if ts.GetSerializedNodesCalled != nil { + return ts.GetSerializedNodesCalled(hash, maxBuffToSend) + } + return nil, nil +} + +func (ts *TrieStub) Database() data.DBWriteCacher { + if ts.DatabaseCalled != nil { + return ts.DatabaseCalled() + } + return nil +} + +func (ts *TrieStub) IsPruningEnabled() bool { + if ts.IsPruningEnabledCalled != nil { + return ts.IsPruningEnabledCalled() } return false } diff --git a/data/receipt/capnp/schema.capnp b/data/receipt/capnp/schema.capnp new file mode 100644 index 00000000000..6afd5adb1ce --- /dev/null +++ b/data/receipt/capnp/schema.capnp @@ -0,0 +1,16 @@ +@0xa6e50837d4563fc2; +using Go = import "/go.capnp"; +$Go.package("capnp"); +$Go.import("_"); + +struct ReceiptCapn { + value @0: Data; + sndAddr @1: Data; + data @2: Data; + txHash @3: Data; +} + +##compile with: +## +## +## capnpc -I$GOPATH/src/github.com/glycerine/go-capnproto -ogo $GOPATH/src/github.com/ElrondNetwork/elrond-go/data/receipt/capnp/schema.capnp \ No newline at end of file diff --git a/data/receipt/capnp/schema.capnp.go b/data/receipt/capnp/schema.capnp.go new file mode 100644 index 00000000000..555965d2ce2 --- /dev/null +++ b/data/receipt/capnp/schema.capnp.go @@ -0,0 +1,230 @@ +package capnp + +// AUTO GENERATED - DO NOT EDIT + +import ( + "bufio" + "bytes" + "encoding/json" + "io" + + C "github.com/glycerine/go-capnproto" +) + +type ReceiptCapn C.Struct + +func NewReceiptCapn(s *C.Segment) ReceiptCapn { return ReceiptCapn(s.NewStruct(0, 4)) } +func NewRootReceiptCapn(s *C.Segment) ReceiptCapn { return ReceiptCapn(s.NewRootStruct(0, 4)) } +func AutoNewReceiptCapn(s *C.Segment) ReceiptCapn { return ReceiptCapn(s.NewStructAR(0, 4)) } +func ReadRootReceiptCapn(s *C.Segment) ReceiptCapn { return ReceiptCapn(s.Root(0).ToStruct()) } +func (s ReceiptCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } +func (s ReceiptCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } +func (s ReceiptCapn) SndAddr() []byte { return C.Struct(s).GetObject(1).ToData() } +func (s ReceiptCapn) SetSndAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } +func (s ReceiptCapn) Data() []byte { return C.Struct(s).GetObject(2).ToData() } +func (s ReceiptCapn) SetData(v []byte) { C.Struct(s).SetObject(2, s.Segment.NewData(v)) } +func (s ReceiptCapn) TxHash() []byte { return C.Struct(s).GetObject(3).ToData() } +func (s ReceiptCapn) SetTxHash(v []byte) { C.Struct(s).SetObject(3, s.Segment.NewData(v)) } +func (s ReceiptCapn) WriteJSON(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('{') + if err != nil { + return err + } + _, err = b.WriteString("\"value\":") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"sndAddr\":") + if err != nil { + return err + } + { + s := s.SndAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"data\":") + if err != nil { + return err + } + { + s := s.Data() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"txHash\":") + if err != nil { + return err + } + { + s := s.TxHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte('}') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s ReceiptCapn) MarshalJSON() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteJSON(&b) + return b.Bytes(), err +} +func (s ReceiptCapn) WriteCapLit(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('(') + if err != nil { + return err + } + _, err = b.WriteString("value = ") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("sndAddr = ") + if err != nil { + return err + } + { + s := s.SndAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("data = ") + if err != nil { + return err + } + { + s := s.Data() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("txHash = ") + if err != nil { + return err + } + { + s := s.TxHash() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(')') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s ReceiptCapn) MarshalCapLit() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteCapLit(&b) + return b.Bytes(), err +} + +type ReceiptCapn_List C.PointerList + +func NewReceiptCapnList(s *C.Segment, sz int) ReceiptCapn_List { + return ReceiptCapn_List(s.NewCompositeList(0, 4, sz)) +} +func (s ReceiptCapn_List) Len() int { return C.PointerList(s).Len() } +func (s ReceiptCapn_List) At(i int) ReceiptCapn { return ReceiptCapn(C.PointerList(s).At(i).ToStruct()) } +func (s ReceiptCapn_List) ToArray() []ReceiptCapn { + n := s.Len() + a := make([]ReceiptCapn, n) + for i := 0; i < n; i++ { + a[i] = s.At(i) + } + return a +} +func (s ReceiptCapn_List) Set(i int, item ReceiptCapn) { C.PointerList(s).Set(i, C.Object(item)) } diff --git a/data/receipt/receipt.go b/data/receipt/receipt.go new file mode 100644 index 00000000000..e5bcad56d8d --- /dev/null +++ b/data/receipt/receipt.go @@ -0,0 +1,131 @@ +package receipt + +import ( + "io" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/receipt/capnp" + capn "github.com/glycerine/go-capnproto" +) + +// Receipt holds all the data needed for a transaction receipt +type Receipt struct { + Value *big.Int `capid:"0" json:"value"` + SndAddr []byte `capid:"1" json:"sender"` + Data []byte `capid:"2" json:"data,omitempty"` + TxHash []byte `capid:"3" json:"txHash"` +} + +// Save saves the serialized data of a Receipt into a stream through Capnp protocol +func (rpt *Receipt) Save(w io.Writer) error { + seg := capn.NewBuffer(nil) + ReceiptGoToCapn(seg, rpt) + _, err := seg.WriteTo(w) + return err +} + +// Load loads the data from the stream into a Receipt object through Capnp protocol +func (rpt *Receipt) Load(r io.Reader) error { + capMsg, err := capn.ReadFromStream(r, nil) + if err != nil { + return err + } + + z := capnp.ReadRootReceiptCapn(capMsg) + ReceiptCapnToGo(z, rpt) + return nil +} + +// ReceiptCapnToGo is a helper function to copy fields from a ReceiptCapn object to a Receipt object +func ReceiptCapnToGo(src capnp.ReceiptCapn, dest *Receipt) *Receipt { + if dest == nil { + dest = &Receipt{} + } + + if dest.Value == nil { + dest.Value = big.NewInt(0) + } + + err := dest.Value.GobDecode(src.Value()) + if err != nil { + return nil + } + + dest.SndAddr = src.SndAddr() + dest.Data = src.Data() + dest.TxHash = src.TxHash() + + return dest +} + +// ReceiptGoToCapn is a helper function to copy fields from a Receipt object to a ReceiptCapn object +func ReceiptGoToCapn(seg *capn.Segment, src *Receipt) capnp.ReceiptCapn { + dest := capnp.AutoNewReceiptCapn(seg) + + value, _ := src.Value.GobEncode() + dest.SetValue(value) + dest.SetSndAddr(src.SndAddr) + dest.SetData(src.Data) + dest.SetTxHash(src.TxHash) + + return dest +} + +// IsInterfaceNil verifies if underlying object is nil +func (rpt *Receipt) IsInterfaceNil() bool { + return rpt == nil +} + +// GetValue returns the value of the receipt +func (rpt *Receipt) GetValue() *big.Int { + return rpt.Value +} + +// GetNonce returns the nonce of the receipt +func (rpt *Receipt) GetNonce() uint64 { + return 0 +} + +// GetData returns the data of the receipt +func (rpt *Receipt) GetData() []byte { + return rpt.Data +} + +// GetRecvAddress returns the receiver address from the receipt +func (rpt *Receipt) GetRecvAddress() []byte { + return rpt.SndAddr +} + +// GetSndAddress returns the sender address from the receipt +func (rpt *Receipt) GetSndAddress() []byte { + return rpt.SndAddr +} + +// GetGasLimit returns the gas limit of the receipt +func (rpt *Receipt) GetGasLimit() uint64 { + return 0 +} + +// GetGasPrice returns the gas price of the receipt +func (rpt *Receipt) GetGasPrice() uint64 { + return 0 +} + +// SetValue sets the value of the receipt +func (rpt *Receipt) SetValue(value *big.Int) { + rpt.Value = value +} + +// SetData sets the data of the receipt +func (rpt *Receipt) SetData(data []byte) { + rpt.Data = data +} + +// SetRecvAddress sets the receiver address of the receipt +func (rpt *Receipt) SetRecvAddress(_ []byte) { +} + +// SetSndAddress sets the sender address of the receipt +func (rpt *Receipt) SetSndAddress(addr []byte) { + rpt.SndAddr = addr +} diff --git a/data/receipt/receipt_test.go b/data/receipt/receipt_test.go new file mode 100644 index 00000000000..be82dd64085 --- /dev/null +++ b/data/receipt/receipt_test.go @@ -0,0 +1 @@ +package receipt diff --git a/data/rewardTx/rewardTx.go b/data/rewardTx/rewardTx.go index 776e98513ea..096d92fd96e 100644 --- a/data/rewardTx/rewardTx.go +++ b/data/rewardTx/rewardTx.go @@ -91,8 +91,8 @@ func (scr *RewardTx) GetNonce() uint64 { } // GetData returns the data of the reward transaction -func (rtx *RewardTx) GetData() string { - return "" +func (rtx *RewardTx) GetData() []byte { + return []byte("") } // GetRecvAddress returns the receiver address from the reward transaction @@ -121,7 +121,7 @@ func (rtx *RewardTx) SetValue(value *big.Int) { } // SetData sets the data of the reward transaction -func (rtx *RewardTx) SetData(data string) { +func (rtx *RewardTx) SetData(data []byte) { } // SetRecvAddress sets the receiver address of the reward transaction diff --git a/data/smartContractResult/smartContractResult.go b/data/smartContractResult/smartContractResult.go index d942007b223..ddfea245ce2 100644 --- a/data/smartContractResult/smartContractResult.go +++ b/data/smartContractResult/smartContractResult.go @@ -8,14 +8,14 @@ import ( capn "github.com/glycerine/go-capnproto" ) -// SmartContractResult holds all the data needed for a value transfer +// SmartContractResult holds all the data needed for results coming from smart contract processing type SmartContractResult struct { Nonce uint64 `capid:"0" json:"nonce"` Value *big.Int `capid:"1" json:"value"` RcvAddr []byte `capid:"2" json:"receiver"` SndAddr []byte `capid:"3" json:"sender"` Code []byte `capid:"4" json:"code,omitempty"` - Data string `capid:"5" json:"data,omitempty"` + Data []byte `capid:"5" json:"data,omitempty"` TxHash []byte `capid:"6" json:"txHash"` GasLimit uint64 `capid:"7" json:"gasLimit"` GasPrice uint64 `capid:"8" json:"gasPrice"` @@ -60,7 +60,7 @@ func SmartContractResultCapnToGo(src capnp.SmartContractResultCapn, dest *SmartC dest.RcvAddr = src.RcvAddr() dest.SndAddr = src.SndAddr() - dest.Data = string(src.Data()) + dest.Data = src.Data() dest.Code = src.Code() dest.TxHash = src.TxHash() @@ -99,7 +99,7 @@ func (scr *SmartContractResult) GetNonce() uint64 { } // GetData returns the data of the smart contract result -func (scr *SmartContractResult) GetData() string { +func (scr *SmartContractResult) GetData() []byte { return scr.Data } @@ -129,7 +129,7 @@ func (scr *SmartContractResult) SetValue(value *big.Int) { } // SetData sets the data of the smart contract result -func (scr *SmartContractResult) SetData(data string) { +func (scr *SmartContractResult) SetData(data []byte) { scr.Data = data } diff --git a/data/smartContractResult/smartContractResult_test.go b/data/smartContractResult/smartContractResult_test.go index 772ae7976b1..6eb916f3bae 100644 --- a/data/smartContractResult/smartContractResult_test.go +++ b/data/smartContractResult/smartContractResult_test.go @@ -15,7 +15,7 @@ func TestSmartContractResult_SaveLoad(t *testing.T) { Value: big.NewInt(1), RcvAddr: []byte("receiver_address"), SndAddr: []byte("sender_address"), - Data: "scr_data", + Data: []byte("scr_data"), Code: []byte("code"), TxHash: []byte("scrHash"), } @@ -32,7 +32,7 @@ func TestSmartContractResult_SaveLoad(t *testing.T) { func TestSmartContractResult_GetData(t *testing.T) { t.Parallel() - data := "data" + data := []byte("data") scr := &smartContractResult.SmartContractResult{Data: data} assert.Equal(t, data, scr.Data) @@ -68,7 +68,7 @@ func TestSmartContractResult_GetValue(t *testing.T) { func TestSmartContractResult_SetData(t *testing.T) { t.Parallel() - data := "data" + data := []byte("data") scr := &smartContractResult.SmartContractResult{} scr.SetData(data) diff --git a/data/state/account.go b/data/state/account.go index 6b2e3b7d3b9..948b4bcaeb9 100644 --- a/data/state/account.go +++ b/data/state/account.go @@ -12,6 +12,7 @@ type Account struct { Balance *big.Int CodeHash []byte RootHash []byte + Address []byte addressContainer AddressContainer code []byte @@ -28,11 +29,14 @@ func NewAccount(addressContainer AddressContainer, tracker AccountTracker) (*Acc return nil, ErrNilAccountTracker } + addressBytes := addressContainer.Bytes() + return &Account{ Balance: big.NewInt(0), addressContainer: addressContainer, + Address: addressBytes, accountTracker: tracker, - dataTrieTracker: NewTrackableDataTrie(nil), + dataTrieTracker: NewTrackableDataTrie(addressBytes, nil), }, nil } diff --git a/data/state/accountsDB.go b/data/state/accountsDB.go index cc02743dc63..5e2f4861d2c 100644 --- a/data/state/accountsDB.go +++ b/data/state/accountsDB.go @@ -1,7 +1,6 @@ package state import ( - "bytes" "errors" "strconv" "sync" @@ -61,7 +60,7 @@ func (adb *AccountsDB) PutCode(accountHandler AccountHandler, code []byte) error if code == nil { return ErrNilCode } - if accountHandler == nil || accountHandler.IsInterfaceNil() { + if check.IfNil(accountHandler) { return ErrNilAccountHandler } @@ -157,22 +156,23 @@ func (adb *AccountsDB) SaveDataTrie(accountHandler AccountHandler) error { oldValues := make(map[string][]byte) for k, v := range trackableDataTrie.DirtyData() { - originalValue := trackableDataTrie.OriginalValue([]byte(k)) + //TODO: delete the next verification when delete from trie bug is repaired + if len(v) == 0 { + continue + } - if !bytes.Equal(v, originalValue) { - flagHasDirtyData = true + flagHasDirtyData = true - val, err := dataTrie.Get([]byte(k)) - if err != nil { - return err - } + val, err := dataTrie.Get([]byte(k)) + if err != nil { + return err + } - oldValues[k] = val + oldValues[k] = val - err = dataTrie.Update([]byte(k), v) - if err != nil { - return err - } + err = dataTrie.Update([]byte(k), v) + if err != nil { + return err } } @@ -374,13 +374,17 @@ func (adb *AccountsDB) Commit() ([]byte, error) { copy(jEntries, adb.entries) adb.mutEntries.RUnlock() + oldHashes := make([][]byte, 0) //Step 1. commit all data tries dataTries := adb.dataTries.GetAll() for i := 0; i < len(dataTries); i++ { + oldTrieHashes := dataTries[i].ResetOldHashes() err := dataTries[i].Commit() if err != nil { return nil, err } + + oldHashes = append(oldHashes, oldTrieHashes...) } adb.dataTries.Reset() @@ -388,6 +392,7 @@ func (adb *AccountsDB) Commit() ([]byte, error) { adb.clearJournal() //Step 3. commit main trie + adb.mainTrie.AppendToOldHashes(oldHashes) err := adb.mainTrie.Commit() if err != nil { return nil, err @@ -457,10 +462,32 @@ func (adb *AccountsDB) clearJournal() { adb.mutEntries.Unlock() } +// PruneTrie removes old values from the trie database +func (adb *AccountsDB) PruneTrie(rootHash []byte) error { + return adb.mainTrie.Prune(rootHash, data.OldRoot) +} + +// CancelPrune clears the trie's evictionWaitingList +func (adb *AccountsDB) CancelPrune(rootHash []byte) { + adb.mainTrie.CancelPrune(rootHash, data.NewRoot) +} + +// SnapshotState triggers the snapshotting process of the state trie +func (adb *AccountsDB) SnapshotState(rootHash []byte) { + adb.mainTrie.TakeSnapshot(rootHash) +} + +// SetStateCheckpoint sets a checkpoint for the state trie +func (adb *AccountsDB) SetStateCheckpoint(rootHash []byte) { + adb.mainTrie.SetCheckpoint(rootHash) +} + +// IsPruningEnabled returns true if state pruning is enabled +func (adb *AccountsDB) IsPruningEnabled() bool { + return adb.mainTrie.IsPruningEnabled() +} + // IsInterfaceNil returns true if there is no value under the interface func (adb *AccountsDB) IsInterfaceNil() bool { - if adb == nil { - return true - } - return false + return adb == nil } diff --git a/data/state/accountsDBWrapperForSync.go b/data/state/accountsDBWrapperForSync.go new file mode 100644 index 00000000000..266a4998585 --- /dev/null +++ b/data/state/accountsDBWrapperForSync.go @@ -0,0 +1,33 @@ +package state + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type accountsDbWrapperSync struct { + *AccountsDB +} + +// NewAccountsDbWrapperSync creates a new account manager used when syncing +func NewAccountsDbWrapperSync(adapter AccountsAdapter) (*accountsDbWrapperSync, error) { + if adapter == nil || adapter.IsInterfaceNil() { + return nil, ErrNilAccountsAdapter + } + + accountsDb, ok := adapter.(*AccountsDB) + if !ok { + return nil, ErrWrongTypeAssertion + } + + return &accountsDbWrapperSync{accountsDb}, nil +} + +// PruneTrie removes old values from the trie database +func (adb *accountsDbWrapperSync) PruneTrie(rootHash []byte) error { + return adb.mainTrie.Prune(rootHash, data.NewRoot) +} + +// CancelPrune clears the trie's evictionWaitingList +func (adb *accountsDbWrapperSync) CancelPrune(rootHash []byte) { + adb.mainTrie.CancelPrune(rootHash, data.OldRoot) +} diff --git a/data/state/accountsDB_test.go b/data/state/accountsDB_test.go index 97e3635b00b..1f0648a8cb8 100644 --- a/data/state/accountsDB_test.go +++ b/data/state/accountsDB_test.go @@ -624,10 +624,14 @@ func TestAccountsDB_LoadDataWithSomeValuesShouldWork(t *testing.T) { rootHash[0] = 1 keyRequired := []byte{65, 66, 67} val := []byte{32, 33, 34} + + trieVal := append(val, keyRequired...) + trieVal = append(trieVal, []byte("identifier")...) + dataTrie := &mock.TrieStub{ GetCalled: func(key []byte) (i []byte, e error) { if bytes.Equal(key, keyRequired) { - return val, nil + return trieVal, nil } return nil, nil diff --git a/data/state/dataTriesHolder_test.go b/data/state/dataTriesHolder_test.go index eeff879bc57..a568c76ba90 100644 --- a/data/state/dataTriesHolder_test.go +++ b/data/state/dataTriesHolder_test.go @@ -62,7 +62,7 @@ func TestDataTriesHolder_Concurrency(t *testing.T) { t.Parallel() dth := state.NewDataTriesHolder() - numTries := 1000 + numTries := 50 wg := sync.WaitGroup{} wg.Add(numTries) diff --git a/data/state/interface.go b/data/state/interface.go index c0a8fa117ca..ea11a0277bd 100644 --- a/data/state/interface.go +++ b/data/state/interface.go @@ -73,6 +73,10 @@ type PeerAccountHandler interface { DecreaseLeaderSuccessRateWithJournal() error IncreaseValidatorSuccessRateWithJournal() error DecreaseValidatorSuccessRateWithJournal() error + GetRating() uint32 + SetRatingWithJournal(uint322 uint32) error + GetTempRating() uint32 + SetTempRatingWithJournal(uint322 uint32) error } // DataTrieTracker models what how to manipulate data held by a SC account @@ -102,6 +106,11 @@ type AccountsAdapter interface { PutCode(accountHandler AccountHandler, code []byte) error RemoveCode(codeHash []byte) error SaveDataTrie(accountHandler AccountHandler) error + PruneTrie(rootHash []byte) error + CancelPrune(rootHash []byte) + SnapshotState(rootHash []byte) + SetStateCheckpoint(rootHash []byte) + IsPruningEnabled() bool IsInterfaceNil() bool } diff --git a/data/state/metaAccount.go b/data/state/metaAccount.go index 878cbe585d9..1437118a5be 100644 --- a/data/state/metaAccount.go +++ b/data/state/metaAccount.go @@ -24,6 +24,7 @@ type MetaAccount struct { MiniBlocks []*MiniBlockData PubKeyLeader []byte ShardRootHash []byte + Address []byte addressContainer AddressContainer code []byte @@ -40,11 +41,14 @@ func NewMetaAccount(addressContainer AddressContainer, tracker AccountTracker) ( return nil, ErrNilAccountTracker } + addressBytes := addressContainer.Bytes() + return &MetaAccount{ TxCount: big.NewInt(0), addressContainer: addressContainer, + Address: addressBytes, accountTracker: tracker, - dataTrieTracker: NewTrackableDataTrie(nil), + dataTrieTracker: NewTrackableDataTrie(addressBytes, nil), }, nil } diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go index 690d25184ce..dad5e2ec9a0 100644 --- a/data/state/peerAccount.go +++ b/data/state/peerAccount.go @@ -36,7 +36,7 @@ type ValidatorApiResponse struct { type PeerAccount struct { BLSPublicKey []byte SchnorrPublicKey []byte - Address []byte + RewardAddress []byte Stake *big.Int JailTime TimePeriod @@ -52,9 +52,10 @@ type PeerAccount struct { CodeHash []byte - Rating uint32 - RootHash []byte - Nonce uint64 + Rating uint32 + TempRating uint32 + RootHash []byte + Nonce uint64 addressContainer AddressContainer code []byte @@ -78,7 +79,7 @@ func NewPeerAccount( Stake: big.NewInt(0), addressContainer: addressContainer, accountTracker: tracker, - dataTrieTracker: NewTrackableDataTrie(nil), + dataTrieTracker: NewTrackableDataTrie(addressContainer.Bytes(), nil), }, nil } @@ -176,19 +177,19 @@ func (a *PeerAccount) DataTrieTracker() DataTrieTracker { return a.dataTrieTracker } -// SetAddressWithJournal sets the account's address, saving the old address before changing -func (a *PeerAccount) SetAddressWithJournal(address []byte) error { +// SetRewardAddressWithJournal sets the account's reward address, saving the old address before changing +func (a *PeerAccount) SetRewardAddressWithJournal(address []byte) error { if len(address) < 1 { return ErrEmptyAddress } - entry, err := NewPeerJournalEntryAddress(a, a.Address) + entry, err := NewPeerJournalEntryAddress(a, a.RewardAddress) if err != nil { return err } a.accountTracker.Journalize(entry) - a.Address = address + a.RewardAddress = address return a.accountTracker.SaveAccount(a) } @@ -365,6 +366,11 @@ func (a *PeerAccount) DecreaseLeaderSuccessRateWithJournal() error { return a.accountTracker.SaveAccount(a) } +// GetRating gets the rating +func (a *PeerAccount) GetRating() uint32 { + return a.Rating +} + // SetRatingWithJournal sets the account's rating id, saving the old state before changing func (a *PeerAccount) SetRatingWithJournal(rating uint32) error { entry, err := NewPeerJournalEntryRating(a, a.Rating) @@ -377,3 +383,21 @@ func (a *PeerAccount) SetRatingWithJournal(rating uint32) error { return a.accountTracker.SaveAccount(a) } + +// GetTempRating gets the rating +func (a *PeerAccount) GetTempRating() uint32 { + return a.TempRating +} + +// SetTempRatingWithJournal sets the account's tempRating, saving the old state before changing +func (a *PeerAccount) SetTempRatingWithJournal(rating uint32) error { + entry, err := NewPeerJournalEntryTempRating(a, a.TempRating) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.TempRating = rating + + return a.accountTracker.SaveAccount(a) +} diff --git a/data/state/peerAccount_test.go b/data/state/peerAccount_test.go index ac09860b074..4f1e77739e0 100644 --- a/data/state/peerAccount_test.go +++ b/data/state/peerAccount_test.go @@ -216,11 +216,11 @@ func TestPeerAccount_SetAddressWithJournal(t *testing.T) { assert.Nil(t, err) address := []byte("address") - err = acc.SetAddressWithJournal(address) + err = acc.SetRewardAddressWithJournal(address) assert.NotNil(t, acc) assert.Nil(t, err) - assert.Equal(t, address, acc.Address) + assert.Equal(t, address, acc.RewardAddress) assert.Equal(t, 1, journalizeCalled) assert.Equal(t, 1, saveAccountCalled) } diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go index d1e96b70801..e8cb080b04a 100644 --- a/data/state/peerJournalEntries.go +++ b/data/state/peerJournalEntries.go @@ -24,7 +24,7 @@ func NewPeerJournalEntryAddress(account *PeerAccount, oldAddress []byte) (*PeerJ // Revert applies undo operation func (pje *PeerJournalEntryAddress) Revert() (AccountHandler, error) { - pje.account.Address = pje.oldAddress + pje.account.RewardAddress = pje.oldAddress return pje.account, nil } @@ -385,6 +385,36 @@ func (pjer *PeerJournalEntryRating) IsInterfaceNil() bool { return false } +// PeerJournalEntryTempRating is used to revert a rating change +type PeerJournalEntryTempRating struct { + account *PeerAccount + oldTempRating uint32 +} + +// NewPeerJournalEntryRating outputs a new PeerJournalEntryRating implementation used to revert a state change +func NewPeerJournalEntryTempRating(account *PeerAccount, oldTempRating uint32) (*PeerJournalEntryTempRating, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryTempRating{ + account: account, + oldTempRating: oldTempRating, + }, nil +} + +// Revert applies undo operation +func (pjer *PeerJournalEntryTempRating) Revert() (AccountHandler, error) { + pjer.account.TempRating = pjer.oldTempRating + + return pjer.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjer *PeerJournalEntryTempRating) IsInterfaceNil() bool { + return pjer == nil +} + // PeerJournalEntryUnStakedNonce is used to revert a unstaked nonce change type PeerJournalEntryUnStakedNonce struct { account *PeerAccount diff --git a/data/state/peerJournalEntries_test.go b/data/state/peerJournalEntries_test.go index 06f5edf097d..1fc87b03e87 100644 --- a/data/state/peerJournalEntries_test.go +++ b/data/state/peerJournalEntries_test.go @@ -36,7 +36,7 @@ func TestPeerJournalEntryAddress_RevertOkValsShouldWork(t *testing.T) { _, err := entry.Revert() assert.Nil(t, err) - assert.Equal(t, shardRootHash, accnt.Address) + assert.Equal(t, shardRootHash, accnt.RewardAddress) } func TestPeerJournalEntrySchnorrPublicKey_NilAccountShouldErr(t *testing.T) { diff --git a/data/state/trackableDataTrie.go b/data/state/trackableDataTrie.go index 2791987b2f1..88c7eba65cf 100644 --- a/data/state/trackableDataTrie.go +++ b/data/state/trackableDataTrie.go @@ -9,14 +9,16 @@ type TrackableDataTrie struct { originalData map[string][]byte dirtyData map[string][]byte tr data.Trie + identifier []byte } // NewTrackableDataTrie returns an instance of DataTrieTracker -func NewTrackableDataTrie(tr data.Trie) *TrackableDataTrie { +func NewTrackableDataTrie(identifier []byte, tr data.Trie) *TrackableDataTrie { return &TrackableDataTrie{ tr: tr, originalData: make(map[string][]byte), dirtyData: make(map[string][]byte), + identifier: identifier, } } @@ -41,37 +43,50 @@ func (tdaw *TrackableDataTrie) OriginalValue(key []byte) []byte { // Data must have been retrieved from its trie func (tdaw *TrackableDataTrie) RetrieveValue(key []byte) ([]byte, error) { strKey := string(key) + tailLength := len(key) + len(tdaw.identifier) //search in dirty data cache - data, found := tdaw.dirtyData[strKey] + value, found := tdaw.dirtyData[strKey] if found { - return data, nil + return trimValue(value, tailLength) } //search in original data cache - data, found = tdaw.originalData[strKey] + value, found = tdaw.originalData[strKey] if found { - return data, nil + return trimValue(value, tailLength) } //ok, not in cache, retrieve from trie if tdaw.tr == nil { return nil, ErrNilTrie } - data, err := tdaw.tr.Get(key) + value, err := tdaw.tr.Get(key) if err != nil { return nil, err } + value, _ = trimValue(value, tailLength) + //got the value, put it originalData cache as the next fetch will run faster - tdaw.originalData[string(key)] = data - return data, nil + tdaw.originalData[string(key)] = value + return value, nil +} + +func trimValue(value []byte, tailLength int) ([]byte, error) { + dataLength := len(value) - tailLength + if dataLength < 0 { + return nil, ErrNegativeValue + } + + return value[:dataLength], nil } // SaveKeyValue stores in dirtyData the data keys "touched" // It does not care if the data is really dirty as calling this check here will be sub-optimal func (tdaw *TrackableDataTrie) SaveKeyValue(key []byte, value []byte) { - tdaw.dirtyData[string(key)] = value + identifier := append(key, tdaw.identifier...) + tdaw.dirtyData[string(key)] = append(value, identifier...) } // SetDataTrie sets the internal data trie diff --git a/data/state/trackableDataTrie_test.go b/data/state/trackableDataTrie_test.go index 5a4170e6050..e63ccd21ef6 100644 --- a/data/state/trackableDataTrie_test.go +++ b/data/state/trackableDataTrie_test.go @@ -13,68 +13,90 @@ import ( func TestTrackableDataAccountRetrieveValueNilDataTrieShouldErr(t *testing.T) { t.Parallel() - as := state.NewTrackableDataTrie(nil) + as := state.NewTrackableDataTrie([]byte("identifier"), nil) assert.NotNil(t, as) - _, err := as.RetrieveValue([]byte{65, 66, 67}) + _, err := as.RetrieveValue([]byte("ABC")) assert.NotNil(t, err) } func TestTrackableDataAccountRetrieveValueFoundInDirtyShouldWork(t *testing.T) { t.Parallel() + stringKey := "ABC" + identifier := []byte("identifier") trie := &mock.TrieStub{} - tdaw := state.NewTrackableDataTrie(trie) + tdaw := state.NewTrackableDataTrie(identifier, trie) assert.NotNil(t, tdaw) tdaw.SetDataTrie(&mock.TrieStub{}) - tdaw.DirtyData()["ABC"] = []byte{32, 33, 34} + key := []byte(stringKey) + val := []byte("123") - val, err := tdaw.RetrieveValue([]byte{65, 66, 67}) + trieVal := append(val, key...) + trieVal = append(trieVal, identifier...) + + tdaw.DirtyData()[stringKey] = trieVal + + retrievedVal, err := tdaw.RetrieveValue(key) assert.Nil(t, err) - assert.Equal(t, []byte{32, 33, 34}, val) + assert.Equal(t, val, retrievedVal) } func TestTrackableDataAccountRetrieveValueFoundInOriginalShouldWork(t *testing.T) { t.Parallel() + originalKeyString := "ABD" + identifier := []byte("identifier") trie := &mock.TrieStub{} - mdaw := state.NewTrackableDataTrie(trie) + mdaw := state.NewTrackableDataTrie(identifier, trie) assert.NotNil(t, mdaw) + originalKey := []byte(originalKeyString) + dirtyVal := []byte("123") + + expectedVal := []byte("456") + originalVal := append(expectedVal, originalKey...) + originalVal = append(originalVal, identifier...) + mdaw.SetDataTrie(&mock.TrieStub{}) - mdaw.DirtyData()["ABC"] = []byte{32, 33, 34} - mdaw.OriginalData()["ABD"] = []byte{35, 36, 37} + mdaw.DirtyData()["ABC"] = dirtyVal + mdaw.OriginalData()[originalKeyString] = originalVal - val, err := mdaw.RetrieveValue([]byte{65, 66, 68}) + val, err := mdaw.RetrieveValue(originalKey) assert.Nil(t, err) - assert.Equal(t, []byte{35, 36, 37}, val) + assert.Equal(t, expectedVal, val) } func TestTrackableDataAccountRetrieveValueFoundInTrieShouldWork(t *testing.T) { t.Parallel() - keyExpected := []byte("key") - value := []byte("value") + identifier := []byte("identifier") + expectedKey := []byte("key") + + expectedVal := []byte("value") + value := append(expectedVal, expectedKey...) + value = append(value, identifier...) + trie := &mock.TrieStub{ UpdateCalled: func(key, value []byte) error { return nil }, GetCalled: func(key []byte) (b []byte, e error) { - if bytes.Equal(key, keyExpected) { + if bytes.Equal(key, expectedKey) { return value, nil } return nil, nil }, } - mdaw := state.NewTrackableDataTrie(trie) + mdaw := state.NewTrackableDataTrie(identifier, trie) assert.NotNil(t, mdaw) - mdaw.DirtyData()[string(keyExpected)] = value + mdaw.DirtyData()[string(expectedKey)] = value - valRecovered, err := mdaw.RetrieveValue(keyExpected) + valRecovered, err := mdaw.RetrieveValue(expectedKey) assert.Nil(t, err) - assert.Equal(t, valRecovered, value) + assert.Equal(t, expectedVal, valRecovered) } func TestTrackableDataAccountRetrieveValueMalfunctionTrieShouldErr(t *testing.T) { @@ -90,7 +112,7 @@ func TestTrackableDataAccountRetrieveValueMalfunctionTrieShouldErr(t *testing.T) return nil, errExpected }, } - mdaw := state.NewTrackableDataTrie(trie) + mdaw := state.NewTrackableDataTrie([]byte("identifier"), trie) assert.NotNil(t, mdaw) valRecovered, err := mdaw.RetrieveValue(keyExpected) @@ -101,8 +123,13 @@ func TestTrackableDataAccountRetrieveValueMalfunctionTrieShouldErr(t *testing.T) func TestTrackableDataAccountSaveKeyValueShouldSaveOnlyInDirty(t *testing.T) { t.Parallel() + identifier := []byte("identifier") keyExpected := []byte("key") value := []byte("value") + + expectedVal := append(value, keyExpected...) + expectedVal = append(expectedVal, identifier...) + trie := &mock.TrieStub{ UpdateCalled: func(key, value []byte) error { return nil @@ -112,13 +139,13 @@ func TestTrackableDataAccountSaveKeyValueShouldSaveOnlyInDirty(t *testing.T) { return nil, nil }, } - mdaw := state.NewTrackableDataTrie(trie) + mdaw := state.NewTrackableDataTrie(identifier, trie) assert.NotNil(t, mdaw) mdaw.SaveKeyValue(keyExpected, value) //test in dirty - assert.Equal(t, value, mdaw.DirtyData()[string(keyExpected)]) + assert.Equal(t, expectedVal, mdaw.DirtyData()[string(keyExpected)]) //test in original assert.Nil(t, mdaw.OriginalData()[string(keyExpected)]) } @@ -127,7 +154,7 @@ func TestTrackableDataAccountClearDataCachesValidDataShouldWork(t *testing.T) { t.Parallel() trie := &mock.TrieStub{} - mdaw := state.NewTrackableDataTrie(trie) + mdaw := state.NewTrackableDataTrie([]byte("identifier"), trie) assert.NotNil(t, mdaw) mdaw.SetDataTrie(&mock.TrieStub{}) @@ -135,7 +162,7 @@ func TestTrackableDataAccountClearDataCachesValidDataShouldWork(t *testing.T) { assert.Equal(t, 0, len(mdaw.DirtyData())) //add something - mdaw.SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) + mdaw.SaveKeyValue([]byte("ABC"), []byte("123")) assert.Equal(t, 1, len(mdaw.DirtyData())) //clear diff --git a/data/transaction/capnp/schema.capnp b/data/transaction/capnp/schema.capnp index 60cbf5201bc..387fb845fbf 100644 --- a/data/transaction/capnp/schema.capnp +++ b/data/transaction/capnp/schema.capnp @@ -11,9 +11,8 @@ struct TransactionCapn { sndAddr @3: Data; gasPrice @4: UInt64; gasLimit @5: UInt64; - data @6: Text; + data @6: Data; signature @7: Data; - challenge @8: Data; } ##compile with: diff --git a/data/transaction/capnp/schema.capnp.go b/data/transaction/capnp/schema.capnp.go index 1ef870c3082..22ff40d47c6 100644 --- a/data/transaction/capnp/schema.capnp.go +++ b/data/transaction/capnp/schema.capnp.go @@ -12,12 +12,12 @@ import ( type TransactionCapn C.Struct -func NewTransactionCapn(s *C.Segment) TransactionCapn { return TransactionCapn(s.NewStruct(24, 6)) } +func NewTransactionCapn(s *C.Segment) TransactionCapn { return TransactionCapn(s.NewStruct(24, 5)) } func NewRootTransactionCapn(s *C.Segment) TransactionCapn { - return TransactionCapn(s.NewRootStruct(24, 6)) + return TransactionCapn(s.NewRootStruct(24, 5)) } func AutoNewTransactionCapn(s *C.Segment) TransactionCapn { - return TransactionCapn(s.NewStructAR(24, 6)) + return TransactionCapn(s.NewStructAR(24, 5)) } func ReadRootTransactionCapn(s *C.Segment) TransactionCapn { return TransactionCapn(s.Root(0).ToStruct()) @@ -34,13 +34,10 @@ func (s TransactionCapn) GasPrice() uint64 { return C.Struct(s).Get64(8) } func (s TransactionCapn) SetGasPrice(v uint64) { C.Struct(s).Set64(8, v) } func (s TransactionCapn) GasLimit() uint64 { return C.Struct(s).Get64(16) } func (s TransactionCapn) SetGasLimit(v uint64) { C.Struct(s).Set64(16, v) } -func (s TransactionCapn) Data() string { return C.Struct(s).GetObject(3).ToText() } -func (s TransactionCapn) DataBytes() []byte { return C.Struct(s).GetObject(3).ToDataTrimLastByte() } -func (s TransactionCapn) SetData(v string) { C.Struct(s).SetObject(3, s.Segment.NewText(v)) } +func (s TransactionCapn) Data() []byte { return C.Struct(s).GetObject(3).ToData() } +func (s TransactionCapn) SetData(v []byte) { C.Struct(s).SetObject(3, s.Segment.NewData(v)) } func (s TransactionCapn) Signature() []byte { return C.Struct(s).GetObject(4).ToData() } func (s TransactionCapn) SetSignature(v []byte) { C.Struct(s).SetObject(4, s.Segment.NewData(v)) } -func (s TransactionCapn) Challenge() []byte { return C.Struct(s).GetObject(5).ToData() } -func (s TransactionCapn) SetChallenge(v []byte) { C.Struct(s).SetObject(5, s.Segment.NewData(v)) } func (s TransactionCapn) WriteJSON(w io.Writer) error { b := bufio.NewWriter(w) var err error @@ -198,25 +195,6 @@ func (s TransactionCapn) WriteJSON(w io.Writer) error { return err } } - err = b.WriteByte(',') - if err != nil { - return err - } - _, err = b.WriteString("\"challenge\":") - if err != nil { - return err - } - { - s := s.Challenge() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } err = b.WriteByte('}') if err != nil { return err @@ -386,25 +364,6 @@ func (s TransactionCapn) WriteCapLit(w io.Writer) error { return err } } - _, err = b.WriteString(", ") - if err != nil { - return err - } - _, err = b.WriteString("challenge = ") - if err != nil { - return err - } - { - s := s.Challenge() - buf, err = json.Marshal(s) - if err != nil { - return err - } - _, err = b.Write(buf) - if err != nil { - return err - } - } err = b.WriteByte(')') if err != nil { return err @@ -421,7 +380,7 @@ func (s TransactionCapn) MarshalCapLit() ([]byte, error) { type TransactionCapn_List C.PointerList func NewTransactionCapnList(s *C.Segment, sz int) TransactionCapn_List { - return TransactionCapn_List(s.NewCompositeList(24, 6, sz)) + return TransactionCapn_List(s.NewCompositeList(24, 5, sz)) } func (s TransactionCapn_List) Len() int { return C.PointerList(s).Len() } func (s TransactionCapn_List) At(i int) TransactionCapn { diff --git a/data/transaction/transaction.go b/data/transaction/transaction.go index f73ea65bf7d..0719549b2b8 100644 --- a/data/transaction/transaction.go +++ b/data/transaction/transaction.go @@ -18,9 +18,8 @@ type Transaction struct { SndAddr []byte `capid:"3" json:"sender"` GasPrice uint64 `capid:"4" json:"gasPrice,omitempty"` GasLimit uint64 `capid:"5" json:"gasLimit,omitempty"` - Data string `capid:"6" json:"data,omitempty"` + Data []byte `capid:"6" json:"data,omitempty"` Signature []byte `capid:"7" json:"signature,omitempty"` - Challenge []byte `capid:"8" json:"challenge,omitempty"` } // Save saves the serialized data of a Transaction into a stream through Capnp protocol @@ -73,8 +72,6 @@ func TransactionCapnToGo(src capnp.TransactionCapn, dest *Transaction) *Transact dest.Data = src.Data() // Signature dest.Signature = src.Signature() - // Challenge - dest.Challenge = src.Challenge() return dest } @@ -92,7 +89,6 @@ func TransactionGoToCapn(seg *capn.Segment, src *Transaction) capnp.TransactionC dest.SetGasLimit(src.GasLimit) dest.SetData(src.Data) dest.SetSignature(src.Signature) - dest.SetChallenge(src.Challenge) return dest } @@ -113,7 +109,7 @@ func (tx *Transaction) GetNonce() uint64 { } // GetData returns the data of the transaction -func (tx *Transaction) GetData() string { +func (tx *Transaction) GetData() []byte { return tx.Data } @@ -143,7 +139,7 @@ func (tx *Transaction) SetValue(value *big.Int) { } // SetData sets the data of the transaction -func (tx *Transaction) SetData(data string) { +func (tx *Transaction) SetData(data []byte) { tx.Data = data } @@ -171,7 +167,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { SndAddr []byte `json:"sender"` GasPrice uint64 `json:"gasPrice,omitempty"` GasLimit uint64 `json:"gasLimit,omitempty"` - Data string `json:"data,omitempty"` + Data []byte `json:"data,omitempty"` Signature []byte `json:"signature,omitempty"` }{ Nonce: tx.Nonce, @@ -194,7 +190,7 @@ func (tx *Transaction) UnmarshalJSON(dataBuff []byte) error { SndAddr []byte `json:"sender"` GasPrice uint64 `json:"gasPrice,omitempty"` GasLimit uint64 `json:"gasLimit,omitempty"` - Data string `json:"data,omitempty"` + Data []byte `json:"data,omitempty"` Signature []byte `json:"signature,omitempty"` }{} if err := json.Unmarshal(dataBuff, &aux); err != nil { @@ -226,3 +222,13 @@ func TrimSlicePtr(in []*Transaction) []*Transaction { copy(ret, in) return ret } + +// TrimSliceHandler creates a copy of the provided slice without the excess capacity +func TrimSliceHandler(in []data.TransactionHandler) []data.TransactionHandler { + if len(in) == 0 { + return []data.TransactionHandler{} + } + ret := make([]data.TransactionHandler, len(in)) + copy(ret, in) + return ret +} diff --git a/data/transaction/transaction_test.go b/data/transaction/transaction_test.go index a3204c7ae65..69dab91f7ea 100644 --- a/data/transaction/transaction_test.go +++ b/data/transaction/transaction_test.go @@ -18,9 +18,8 @@ func TestTransaction_SaveLoad(t *testing.T) { SndAddr: []byte("sender_address"), GasPrice: uint64(10000), GasLimit: uint64(1000), - Data: "tx_data", + Data: []byte("tx_data"), Signature: []byte("signature"), - Challenge: []byte("challenge"), } var b bytes.Buffer @@ -35,7 +34,7 @@ func TestTransaction_SaveLoad(t *testing.T) { func TestTransaction_GetData(t *testing.T) { t.Parallel() - data := "data" + data := []byte("data") tx := &transaction.Transaction{Data: data} assert.Equal(t, data, tx.Data) @@ -71,7 +70,7 @@ func TestTransaction_GetValue(t *testing.T) { func TestTransaction_SetData(t *testing.T) { t.Parallel() - data := "data" + data := []byte("data") tx := &transaction.Transaction{} tx.SetData(data) @@ -119,7 +118,7 @@ func TestTransaction_MarshalUnmarshalJsonShouldWork(t *testing.T) { SndAddr: []byte("sender"), GasPrice: 1234, GasLimit: 5678, - Data: "data", + Data: []byte("data"), Signature: []byte("signature"), } diff --git a/data/trie/branchNode.go b/data/trie/branchNode.go index 2fed1e1a127..95d77a3a962 100644 --- a/data/trie/branchNode.go +++ b/data/trie/branchNode.go @@ -47,8 +47,9 @@ func branchNodeGoToCapn(seg *capn.Segment, src *branchNode) capnp.BranchNodeCapn func branchNodeCapnToGo(src capnp.BranchNodeCapn, dest *branchNode) *branchNode { if dest == nil { - dest = newBranchNode() + dest = emptyDirtyBranchNode() } + dest.dirty = false for i := 0; i < nrOfChildren; i++ { child := src.EncodedChildren().At(i) @@ -57,22 +58,47 @@ func branchNodeCapnToGo(src capnp.BranchNodeCapn, dest *branchNode) *branchNode } else { dest.EncodedChildren[i] = child } - } + return dest } -func newBranchNode() *branchNode { +func newBranchNode(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*branchNode, error) { + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, ErrNilHasher + } + + var children [nrOfChildren]node + encChildren := make([][]byte, nrOfChildren) + + return &branchNode{ + CollapsedBn: protobuf.CollapsedBn{ + EncodedChildren: encChildren, + }, + children: children, + baseNode: &baseNode{ + dirty: true, + marsh: marshalizer, + hasher: hasher, + }, + }, nil +} + +func emptyDirtyBranchNode() *branchNode { var children [nrOfChildren]node - EncChildren := make([][]byte, nrOfChildren) + encChildren := make([][]byte, nrOfChildren) return &branchNode{ CollapsedBn: protobuf.CollapsedBn{ - EncodedChildren: EncChildren, + EncodedChildren: encChildren, }, children: children, - hash: nil, - dirty: true, + baseNode: &baseNode{ + dirty: true, + }, } } @@ -80,11 +106,31 @@ func (bn *branchNode) getHash() []byte { return bn.hash } +func (bn *branchNode) setGivenHash(hash []byte) { + bn.hash = hash +} + func (bn *branchNode) isDirty() bool { return bn.dirty } -func (bn *branchNode) getCollapsed(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { +func (bn *branchNode) getMarshalizer() marshal.Marshalizer { + return bn.marsh +} + +func (bn *branchNode) setMarshalizer(marshalizer marshal.Marshalizer) { + bn.marsh = marshalizer +} + +func (bn *branchNode) getHasher() hashing.Hasher { + return bn.hasher +} + +func (bn *branchNode) setHasher(hasher hashing.Hasher) { + bn.hasher = hasher +} + +func (bn *branchNode) getCollapsed() (node, error) { err := bn.isEmptyOrNil() if err != nil { return nil, err @@ -95,12 +141,13 @@ func (bn *branchNode) getCollapsed(marshalizer marshal.Marshalizer, hasher hashi collapsed := bn.clone() for i := range bn.children { if bn.children[i] != nil { - ok, err := hasValidHash(bn.children[i]) + var ok bool + ok, err = hasValidHash(bn.children[i]) if err != nil { return nil, err } if !ok { - err := bn.children[i].setHash(marshalizer, hasher) + err = bn.children[i].setHash() if err != nil { return nil, err } @@ -112,7 +159,7 @@ func (bn *branchNode) getCollapsed(marshalizer marshal.Marshalizer, hasher hashi return collapsed, nil } -func (bn *branchNode) setHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (bn *branchNode) setHash() error { err := bn.isEmptyOrNil() if err != nil { return err @@ -121,14 +168,15 @@ func (bn *branchNode) setHash(marshalizer marshal.Marshalizer, hasher hashing.Ha return nil } if bn.isCollapsed() { - hash, err := encodeNodeAndGetHash(bn, marshalizer, hasher) + var hash []byte + hash, err = encodeNodeAndGetHash(bn) if err != nil { return err } bn.hash = hash return nil } - hash, err := hashChildrenAndNode(bn, marshalizer, hasher) + hash, err := hashChildrenAndNode(bn) if err != nil { return err } @@ -136,7 +184,7 @@ func (bn *branchNode) setHash(marshalizer marshal.Marshalizer, hasher hashing.Ha return nil } -func (bn *branchNode) setRootHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (bn *branchNode) setRootHash() error { err := bn.isEmptyOrNil() if err != nil { return err @@ -145,7 +193,8 @@ func (bn *branchNode) setRootHash(marshalizer marshal.Marshalizer, hasher hashin return nil } if bn.isCollapsed() { - hash, err := encodeNodeAndGetHash(bn, marshalizer, hasher) + var hash []byte + hash, err = encodeNodeAndGetHash(bn) if err != nil { return err } @@ -159,17 +208,17 @@ func (bn *branchNode) setRootHash(marshalizer marshal.Marshalizer, hasher hashin for i := 0; i < nrOfChildren; i++ { if bn.children[i] != nil { wg.Add(1) - go bn.children[i].setHashConcurrent(marshalizer, hasher, &wg, errc) + go bn.children[i].setHashConcurrent(&wg, errc) } } wg.Wait() if len(errc) != 0 { - for err := range errc { + for err = range errc { return err } } - hashed, err := bn.hashNode(marshalizer, hasher) + hashed, err := bn.hashNode() if err != nil { return err } @@ -178,7 +227,7 @@ func (bn *branchNode) setRootHash(marshalizer marshal.Marshalizer, hasher hashin return nil } -func (bn *branchNode) setHashConcurrent(marshalizer marshal.Marshalizer, hasher hashing.Hasher, wg *sync.WaitGroup, c chan error) { +func (bn *branchNode) setHashConcurrent(wg *sync.WaitGroup, c chan error) { defer wg.Done() err := bn.isEmptyOrNil() if err != nil { @@ -189,7 +238,8 @@ func (bn *branchNode) setHashConcurrent(marshalizer marshal.Marshalizer, hasher return } if bn.isCollapsed() { - hash, err := encodeNodeAndGetHash(bn, marshalizer, hasher) + var hash []byte + hash, err = encodeNodeAndGetHash(bn) if err != nil { c <- err return @@ -197,7 +247,7 @@ func (bn *branchNode) setHashConcurrent(marshalizer marshal.Marshalizer, hasher bn.hash = hash return } - hash, err := hashChildrenAndNode(bn, marshalizer, hasher) + hash, err := hashChildrenAndNode(bn) if err != nil { c <- err return @@ -206,14 +256,14 @@ func (bn *branchNode) setHashConcurrent(marshalizer marshal.Marshalizer, hasher return } -func (bn *branchNode) hashChildren(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (bn *branchNode) hashChildren() error { err := bn.isEmptyOrNil() if err != nil { return err } for i := 0; i < nrOfChildren; i++ { if bn.children[i] != nil { - err := bn.children[i].setHash(marshalizer, hasher) + err = bn.children[i].setHash() if err != nil { return err } @@ -222,47 +272,61 @@ func (bn *branchNode) hashChildren(marshalizer marshal.Marshalizer, hasher hashi return nil } -func (bn *branchNode) hashNode(marshalizer marshal.Marshalizer, hasher hashing.Hasher) ([]byte, error) { +func (bn *branchNode) hashNode() ([]byte, error) { err := bn.isEmptyOrNil() if err != nil { return nil, err } for i := range bn.EncodedChildren { if bn.children[i] != nil { - encChild, err := encodeNodeAndGetHash(bn.children[i], marshalizer, hasher) + var encChild []byte + encChild, err = encodeNodeAndGetHash(bn.children[i]) if err != nil { return nil, err } bn.EncodedChildren[i] = encChild } } - return encodeNodeAndGetHash(bn, marshalizer, hasher) + return encodeNodeAndGetHash(bn) } -func (bn *branchNode) commit(level byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (bn *branchNode) commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error { level++ err := bn.isEmptyOrNil() if err != nil { return err } - if !bn.dirty { + + shouldNotCommit := !bn.dirty && !force + if shouldNotCommit { return nil } + for i := range bn.children { - if bn.children[i] != nil { - err := bn.children[i].commit(level, db, marshalizer, hasher) + if force { + err = resolveIfCollapsed(bn, byte(i), originDb) if err != nil { return err } } + + if bn.children[i] == nil { + continue + } + + err = bn.children[i].commit(force, level, originDb, targetDb) + if err != nil { + return err + } } bn.dirty = false - err = encodeNodeAndCommitToDB(bn, db, marshalizer, hasher) + err = encodeNodeAndCommitToDB(bn, targetDb) if err != nil { return err } if level == maxTrieLevelAfterCommit { - collapsed, err := bn.getCollapsed(marshalizer, hasher) + var collapsed node + collapsed, err = bn.getCollapsed() if err != nil { return err } @@ -273,12 +337,12 @@ func (bn *branchNode) commit(level byte, db data.DBWriteCacher, marshalizer mars return nil } -func (bn *branchNode) getEncodedNode(marshalizer marshal.Marshalizer) ([]byte, error) { +func (bn *branchNode) getEncodedNode() ([]byte, error) { err := bn.isEmptyOrNil() if err != nil { return nil, err } - marshaledNode, err := marshalizer.Marshal(bn) + marshaledNode, err := bn.marsh.Marshal(bn) if err != nil { return nil, err } @@ -286,7 +350,7 @@ func (bn *branchNode) getEncodedNode(marshalizer marshal.Marshalizer) ([]byte, e return marshaledNode, nil } -func (bn *branchNode) resolveCollapsed(pos byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { +func (bn *branchNode) resolveCollapsed(pos byte, db data.DBWriteCacher) error { err := bn.isEmptyOrNil() if err != nil { return err @@ -295,10 +359,12 @@ func (bn *branchNode) resolveCollapsed(pos byte, db data.DBWriteCacher, marshali return ErrChildPosOutOfRange } if len(bn.EncodedChildren[pos]) != 0 { - child, err := getNodeFromDBAndDecode(bn.EncodedChildren[pos], db, marshalizer) + var child node + child, err = getNodeFromDBAndDecode(bn.EncodedChildren[pos], db, bn.marsh, bn.hasher) if err != nil { return err } + child.setGivenHash(bn.EncodedChildren[pos]) bn.children[pos] = child } return nil @@ -317,7 +383,7 @@ func (bn *branchNode) isPosCollapsed(pos int) bool { return bn.children[pos] == nil && len(bn.EncodedChildren[pos]) != 0 } -func (bn *branchNode) tryGet(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (value []byte, err error) { +func (bn *branchNode) tryGet(key []byte, db data.DBWriteCacher) (value []byte, err error) { err = bn.isEmptyOrNil() if err != nil { return nil, err @@ -330,7 +396,7 @@ func (bn *branchNode) tryGet(key []byte, db data.DBWriteCacher, marshalizer mars return nil, ErrChildPosOutOfRange } key = key[1:] - err = resolveIfCollapsed(bn, childPos, db, marshalizer) + err = resolveIfCollapsed(bn, childPos, db) if err != nil { return nil, err } @@ -338,10 +404,10 @@ func (bn *branchNode) tryGet(key []byte, db data.DBWriteCacher, marshalizer mars return nil, nil } - return bn.children[childPos].tryGet(key, db, marshalizer) + return bn.children[childPos].tryGet(key, db) } -func (bn *branchNode) getNext(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (node, []byte, error) { +func (bn *branchNode) getNext(key []byte, db data.DBWriteCacher) (node, []byte, error) { err := bn.isEmptyOrNil() if err != nil { return nil, nil, err @@ -354,7 +420,7 @@ func (bn *branchNode) getNext(key []byte, db data.DBWriteCacher, marshalizer mar return nil, nil, ErrChildPosOutOfRange } key = key[1:] - err = resolveIfCollapsed(bn, childPos, db, marshalizer) + err = resolveIfCollapsed(bn, childPos, db) if err != nil { return nil, nil, err } @@ -365,63 +431,93 @@ func (bn *branchNode) getNext(key []byte, db data.DBWriteCacher, marshalizer mar return bn.children[childPos], key, nil } -func (bn *branchNode) insert(n *leafNode, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) { +func (bn *branchNode) insert(n *leafNode, db data.DBWriteCacher) (bool, node, [][]byte, error) { + emptyHashes := make([][]byte, 0) err := bn.isEmptyOrNil() if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } if len(n.Key) == 0 { - return false, nil, ErrValueTooShort + return false, nil, emptyHashes, ErrValueTooShort } childPos := n.Key[firstByte] if childPosOutOfRange(childPos) { - return false, nil, ErrChildPosOutOfRange + return false, nil, emptyHashes, ErrChildPosOutOfRange } n.Key = n.Key[1:] - err = resolveIfCollapsed(bn, childPos, db, marshalizer) + err = resolveIfCollapsed(bn, childPos, db) if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } if bn.children[childPos] != nil { - dirty, newNode, err := bn.children[childPos].insert(n, db, marshalizer) + var dirty bool + var newNode node + var oldHashes [][]byte + + dirty, newNode, oldHashes, err = bn.children[childPos].insert(n, db) if !dirty || err != nil { - return false, bn, err + return false, bn, emptyHashes, err + } + + if !bn.dirty { + oldHashes = append(oldHashes, bn.hash) } + bn.children[childPos] = newNode bn.dirty = dirty if dirty { bn.hash = nil } - return true, bn, nil + return true, bn, oldHashes, nil + } + + newLn, err := newLeafNode(n.Key, n.Value, bn.marsh, bn.hasher) + if err != nil { + return false, nil, emptyHashes, err + } + bn.children[childPos] = newLn + + oldHash := make([][]byte, 0) + if !bn.dirty { + oldHash = append(oldHash, bn.hash) } - bn.children[childPos] = newLeafNode(n.Key, n.Value) + bn.dirty = true bn.hash = nil - return true, bn, nil + return true, bn, oldHash, nil } -func (bn *branchNode) delete(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) { +func (bn *branchNode) delete(key []byte, db data.DBWriteCacher) (bool, node, [][]byte, error) { + emptyHashes := make([][]byte, 0) err := bn.isEmptyOrNil() if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } if len(key) == 0 { - return false, nil, ErrValueTooShort + return false, nil, emptyHashes, ErrValueTooShort } childPos := key[firstByte] if childPosOutOfRange(childPos) { - return false, nil, ErrChildPosOutOfRange + return false, nil, emptyHashes, ErrChildPosOutOfRange } key = key[1:] - err = resolveIfCollapsed(bn, childPos, db, marshalizer) + err = resolveIfCollapsed(bn, childPos, db) if err != nil { - return false, nil, err + return false, nil, emptyHashes, err + } + + if bn.children[childPos] == nil { + return false, bn, emptyHashes, nil } - dirty, newNode, err := bn.children[childPos].delete(key, db, marshalizer) + dirty, newNode, oldHashes, err := bn.children[childPos].delete(key, db) if !dirty || err != nil { - return false, nil, err + return false, nil, emptyHashes, err + } + + if !bn.dirty { + oldHashes = append(oldHashes, bn.hash) } bn.hash = nil @@ -430,26 +526,34 @@ func (bn *branchNode) delete(key []byte, db data.DBWriteCacher, marshalizer mars bn.EncodedChildren[childPos] = nil } - nrOfChildren, pos := getChildPosition(bn) + numChildren, pos := getChildPosition(bn) - if nrOfChildren == 1 { - err = resolveIfCollapsed(bn, byte(pos), db, marshalizer) + if numChildren == 1 { + err = resolveIfCollapsed(bn, byte(pos), db) if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } - newNode := bn.children[pos].reduceNode(pos) + newNode, err = bn.children[pos].reduceNode(pos) + if err != nil { + return false, nil, emptyHashes, err + } - return true, newNode, nil + return true, newNode, oldHashes, nil } bn.dirty = dirty - return true, bn, nil + return true, bn, oldHashes, nil } -func (bn *branchNode) reduceNode(pos int) node { - return newExtensionNode([]byte{byte(pos)}, bn) +func (bn *branchNode) reduceNode(pos int) (node, error) { + newEn, err := newExtensionNode([]byte{byte(pos)}, bn, bn.marsh, bn.hasher) + if err != nil { + return nil, err + } + + return newEn, nil } func getChildPosition(n *branchNode) (nrOfChildren int, childPos int) { @@ -506,7 +610,7 @@ func (bn *branchNode) deepClone() node { return nil } - clonedNode := &branchNode{} + clonedNode := &branchNode{baseNode: &baseNode{}} if bn.hash != nil { clonedNode.hash = make([]byte, len(bn.hash)) @@ -534,10 +638,106 @@ func (bn *branchNode) deepClone() node { } clonedNode.dirty = bn.dirty + clonedNode.marsh = bn.marsh + clonedNode.hasher = bn.hasher return clonedNode } +func (bn *branchNode) getDirtyHashes() ([][]byte, error) { + err := bn.isEmptyOrNil() + if err != nil { + return nil, err + } + + dirtyHashes := make([][]byte, 0) + + if !bn.isDirty() { + return dirtyHashes, nil + } + + for i := range bn.children { + if bn.children[i] == nil { + continue + } + + var hashes [][]byte + hashes, err = bn.children[i].getDirtyHashes() + if err != nil { + return nil, err + } + + dirtyHashes = append(dirtyHashes, hashes...) + } + + dirtyHashes = append(dirtyHashes, bn.getHash()) + return dirtyHashes, nil +} + +func (bn *branchNode) getChildren(db data.DBWriteCacher) ([]node, error) { + err := bn.isEmptyOrNil() + if err != nil { + return nil, err + } + + nextNodes := make([]node, 0) + + for i := range bn.children { + err = resolveIfCollapsed(bn, byte(i), db) + if err != nil { + return nil, err + } + + if bn.children[i] == nil { + continue + } + + nextNodes = append(nextNodes, bn.children[i]) + } + + return nextNodes, nil +} + +func (bn *branchNode) isValid() bool { + nrChildren := 0 + for i := range bn.EncodedChildren { + if len(bn.EncodedChildren[i]) != 0 || bn.children[i] != nil { + nrChildren++ + } + } + + return nrChildren >= 2 +} + +func (bn *branchNode) setDirty(dirty bool) { + bn.dirty = dirty +} + +func (bn *branchNode) loadChildren(syncer *trieSyncer) error { + err := bn.isEmptyOrNil() + if err != nil { + return err + } + + for i := range bn.EncodedChildren { + if len(bn.EncodedChildren[i]) == 0 { + continue + } + + var child node + child, err = syncer.getNode(bn.EncodedChildren[i]) + if err != nil { + return err + } + + bn.children[i] = child + } + + syncer.interceptedNodes.Remove(bn.hash) + + return nil +} + func (bn *branchNode) getAllLeaves(leaves map[string][]byte, key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { err := bn.isEmptyOrNil() if err != nil { @@ -545,7 +745,7 @@ func (bn *branchNode) getAllLeaves(leaves map[string][]byte, key []byte, db data } for i := range bn.children { - err = resolveIfCollapsed(bn, byte(i), db, marshalizer) + err = resolveIfCollapsed(bn, byte(i), db) if err != nil { return err } diff --git a/data/trie/branchNode_test.go b/data/trie/branchNode_test.go index 411c1da0965..ac544137239 100644 --- a/data/trie/branchNode_test.go +++ b/data/trie/branchNode_test.go @@ -3,118 +3,189 @@ package trie import ( "encoding/hex" "fmt" + "io/ioutil" "reflect" "strconv" "testing" + "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/trie/proto" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" ) func getTestMarshAndHasher() (marshal.Marshalizer, hashing.Hasher) { - marsh := &mock.ProtobufMarshalizerMock{} + marsh := &mock.MarshalizerMock{} hasher := &mock.KeccakMock{} return marsh, hasher } -func getBnAndCollapsedBn() (*branchNode, *branchNode) { - marsh, hasher := getTestMarshAndHasher() - +func getBnAndCollapsedBn(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*branchNode, *branchNode) { var children [nrOfChildren]node EncodedChildren := make([][]byte, nrOfChildren) - children[2] = newLeafNode([]byte("dog"), []byte("dog")) - children[6] = newLeafNode([]byte("doe"), []byte("doe")) - children[13] = newLeafNode([]byte("doge"), []byte("doge")) - bn := newBranchNode() + children[2], _ = newLeafNode([]byte("dog"), []byte("dog"), marshalizer, hasher) + children[6], _ = newLeafNode([]byte("doe"), []byte("doe"), marshalizer, hasher) + children[13], _ = newLeafNode([]byte("doge"), []byte("doge"), marshalizer, hasher) + bn, _ := newBranchNode(marshalizer, hasher) bn.children = children - EncodedChildren[2], _ = encodeNodeAndGetHash(children[2], marsh, hasher) - EncodedChildren[6], _ = encodeNodeAndGetHash(children[6], marsh, hasher) - EncodedChildren[13], _ = encodeNodeAndGetHash(children[13], marsh, hasher) - collapsedBn := newBranchNode() + EncodedChildren[2], _ = encodeNodeAndGetHash(children[2]) + EncodedChildren[6], _ = encodeNodeAndGetHash(children[6]) + EncodedChildren[13], _ = encodeNodeAndGetHash(children[13]) + collapsedBn, _ := newBranchNode(marshalizer, hasher) collapsedBn.EncodedChildren = EncodedChildren return bn, collapsedBn } +func newEmptyTrie() (*patriciaMerkleTrie, *trieStorageManager, *mock.EvictionWaitingList) { + db := memorydb.New() + marsh, hsh := getTestMarshAndHasher() + evictionWaitListSize := uint(100) + evictionWaitList, _ := mock.NewEvictionWaitingList(evictionWaitListSize, mock.NewMemDbMock(), marsh) + + // TODO change this initialization of the persister (and everywhere in this package) + // by using a persister factory + tempDir, _ := ioutil.TempDir("", "leveldb_temp") + cfg := &config.DBConfig{ + FilePath: tempDir, + Type: string(storageUnit.LvlDbSerial), + BatchDelaySeconds: 1, + MaxBatchSize: 1, + MaxOpenFiles: 10, + } + + trieStorage, _ := NewTrieStorageManager(db, cfg, evictionWaitList) + tr := &patriciaMerkleTrie{ + trieStorage: trieStorage, + marshalizer: marsh, + hasher: hsh, + oldHashes: make([][]byte, 0), + oldRoot: make([]byte, 0), + } + + return tr, trieStorage, evictionWaitList +} + +func initTrie() *patriciaMerkleTrie { + tr, _, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + + return tr +} + +func getEncodedTrieNodesAndHashes(tr data.Trie) ([][]byte, [][]byte) { + it, _ := NewIterator(tr) + encNode, _ := it.MarshalizedNode() + + nodes := make([][]byte, 0) + nodes = append(nodes, encNode) + + hashes := make([][]byte, 0) + hash, _ := it.GetHash() + hashes = append(hashes, hash) + + for it.HasNext() { + _ = it.Next() + encNode, _ = it.MarshalizedNode() + + nodes = append(nodes, encNode) + hash, _ = it.GetHash() + hashes = append(hashes, hash) + } + + return nodes, hashes +} + func TestBranchNode_getHash(t *testing.T) { t.Parallel() - bn := &branchNode{hash: []byte("test hash")} + + bn := &branchNode{baseNode: &baseNode{hash: []byte("test hash")}} assert.Equal(t, bn.hash, bn.getHash()) } func TestBranchNode_isDirty(t *testing.T) { t.Parallel() - bn := &branchNode{dirty: true} + + bn := &branchNode{baseNode: &baseNode{dirty: true}} assert.Equal(t, true, bn.isDirty()) - bn = &branchNode{dirty: false} + bn = &branchNode{baseNode: &baseNode{dirty: false}} assert.Equal(t, false, bn.isDirty()) } func TestBranchNode_getCollapsed(t *testing.T) { t.Parallel() - bn, collapsedBn := getBnAndCollapsedBn() + + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) collapsedBn.dirty = true - marsh, hasher := getTestMarshAndHasher() - collapsed, err := bn.getCollapsed(marsh, hasher) + collapsed, err := bn.getCollapsed() assert.Nil(t, err) assert.Equal(t, collapsedBn, collapsed) } func TestBranchNode_getCollapsedEmptyNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - bn := newBranchNode() - collapsed, err := bn.getCollapsed(marsh, hasher) + bn := emptyDirtyBranchNode() + + collapsed, err := bn.getCollapsed() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, collapsed) } func TestBranchNode_getCollapsedNilNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + var bn *branchNode - collapsed, err := bn.getCollapsed(marsh, hasher) + collapsed, err := bn.getCollapsed() assert.Equal(t, ErrNilNode, err) assert.Nil(t, collapsed) } func TestBranchNode_getCollapsedCollapsedNode(t *testing.T) { t.Parallel() - _, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - collapsed, err := collapsedBn.getCollapsed(marsh, hasher) + _, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + + collapsed, err := collapsedBn.getCollapsed() assert.Nil(t, err) assert.Equal(t, collapsedBn, collapsed) } func TestBranchNode_setHash(t *testing.T) { t.Parallel() - bn, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + hash, _ := encodeNodeAndGetHash(collapsedBn) - err := bn.setHash(marsh, hasher) + err := bn.setHash() assert.Nil(t, err) assert.Equal(t, hash, bn.hash) } func TestBranchNode_setRootHash(t *testing.T) { - db, _ := mock.NewMemDbMock() + t.Parallel() + + cfg := &config.DBConfig{} + db := mock.NewMemDbMock() marsh, hsh := getTestMarshAndHasher() + trieStorage, _ := NewTrieStorageManager(db, cfg, &mock.EvictionWaitingList{}) - tr1, _ := NewTrie(db, marsh, hsh) - tr2, _ := NewTrie(db, marsh, hsh) + tr1, _ := NewTrie(trieStorage, marsh, hsh) + tr2, _ := NewTrie(trieStorage, marsh, hsh) maxIterations := 10000 for i := 0; i < maxIterations; i++ { @@ -123,73 +194,80 @@ func TestBranchNode_setRootHash(t *testing.T) { _ = tr2.Update(val, val) } - err := tr1.root.setRootHash(marsh, hsh) - _ = tr2.root.setHash(marsh, hsh) + err := tr1.root.setRootHash() + _ = tr2.root.setHash() assert.Nil(t, err) assert.Equal(t, tr1.root.getHash(), tr2.root.getHash()) } func TestBranchNode_setRootHashCollapsedNode(t *testing.T) { t.Parallel() - _, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) + _, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + hash, _ := encodeNodeAndGetHash(collapsedBn) - err := collapsedBn.setRootHash(marsh, hasher) + err := collapsedBn.setRootHash() assert.Nil(t, err) assert.Equal(t, hash, collapsedBn.hash) } func TestBranchNode_setHashEmptyNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - bn := newBranchNode() - err := bn.setHash(marsh, hasher) + bn := emptyDirtyBranchNode() + + err := bn.setHash() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, bn.hash) } func TestBranchNode_setHashNilNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + var bn *branchNode - err := bn.setHash(marsh, hasher) + err := bn.setHash() assert.Equal(t, ErrNilNode, err) assert.Nil(t, bn) } func TestBranchNode_setHashCollapsedNode(t *testing.T) { t.Parallel() - _, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) + _, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + hash, _ := encodeNodeAndGetHash(collapsedBn) - err := collapsedBn.setHash(marsh, hasher) + err := collapsedBn.setHash() assert.Nil(t, err) assert.Equal(t, hash, collapsedBn.hash) +} +func TestBranchNode_setGivenHash(t *testing.T) { + t.Parallel() + + bn := &branchNode{baseNode: &baseNode{}} + expectedHash := []byte("node hash") + + bn.setGivenHash(expectedHash) + assert.Equal(t, expectedHash, bn.hash) } func TestBranchNode_hashChildren(t *testing.T) { t.Parallel() - bn, _ := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) for i := range bn.children { if bn.children[i] != nil { assert.Nil(t, bn.children[i].getHash()) } } - err := bn.hashChildren(marsh, hasher) + err := bn.hashChildren() assert.Nil(t, err) for i := range bn.children { if bn.children[i] != nil { - childHash, _ := encodeNodeAndGetHash(bn.children[i], marsh, hasher) + childHash, _ := encodeNodeAndGetHash(bn.children[i]) assert.Equal(t, childHash, bn.children[i].getHash()) } } @@ -197,306 +275,303 @@ func TestBranchNode_hashChildren(t *testing.T) { func TestBranchNode_hashChildrenEmptyNode(t *testing.T) { t.Parallel() - bn := newBranchNode() - marsh, hasher := getTestMarshAndHasher() - err := bn.hashChildren(marsh, hasher) + bn := emptyDirtyBranchNode() + + err := bn.hashChildren() assert.Equal(t, ErrEmptyNode, err) } func TestBranchNode_hashChildrenNilNode(t *testing.T) { t.Parallel() + var bn *branchNode - marsh, hasher := getTestMarshAndHasher() - err := bn.hashChildren(marsh, hasher) + err := bn.hashChildren() assert.Equal(t, ErrNilNode, err) } func TestBranchNode_hashChildrenCollapsedNode(t *testing.T) { t.Parallel() - _, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - err := collapsedBn.hashChildren(marsh, hasher) + _, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + + err := collapsedBn.hashChildren() assert.Nil(t, err) - _, collapsedBn2 := getBnAndCollapsedBn() + _, collapsedBn2 := getBnAndCollapsedBn(getTestMarshAndHasher()) assert.Equal(t, collapsedBn2, collapsedBn) } func TestBranchNode_hashNode(t *testing.T) { t.Parallel() - _, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - expectedHash, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) - hash, err := collapsedBn.hashNode(marsh, hasher) + _, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + expectedHash, _ := encodeNodeAndGetHash(collapsedBn) + + hash, err := collapsedBn.hashNode() assert.Nil(t, err) assert.Equal(t, expectedHash, hash) } func TestBranchNode_hashNodeEmptyNode(t *testing.T) { t.Parallel() - bn := newBranchNode() - marsh, hasher := getTestMarshAndHasher() - hash, err := bn.hashNode(marsh, hasher) + bn := emptyDirtyBranchNode() + + hash, err := bn.hashNode() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, hash) } func TestBranchNode_hashNodeNilNode(t *testing.T) { t.Parallel() + var bn *branchNode - marsh, hasher := getTestMarshAndHasher() - hash, err := bn.hashNode(marsh, hasher) + hash, err := bn.hashNode() assert.Equal(t, ErrNilNode, err) assert.Nil(t, hash) } func TestBranchNode_commit(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, collapsedBn := getBnAndCollapsedBn() + + db := mock.NewMemDbMock() marsh, hasher := getTestMarshAndHasher() + bn, collapsedBn := getBnAndCollapsedBn(marsh, hasher) - hash, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) - _ = bn.setHash(marsh, hasher) + hash, _ := encodeNodeAndGetHash(collapsedBn) + _ = bn.setHash() - err := bn.commit(0, db, marsh, hasher) + err := bn.commit(false, 0, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) - node, _ := decodeNode(encNode, marsh) - h1, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + node, _ := decodeNode(encNode, marsh, hasher) + h1, _ := encodeNodeAndGetHash(collapsedBn) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestBranchNode_commitEmptyNode(t *testing.T) { t.Parallel() - bn := newBranchNode() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - err := bn.commit(0, db, marsh, hasher) + bn := emptyDirtyBranchNode() + + err := bn.commit(false, 0, nil, nil) assert.Equal(t, ErrEmptyNode, err) } func TestBranchNode_commitNilNode(t *testing.T) { t.Parallel() + var bn *branchNode - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - err := bn.commit(0, db, marsh, hasher) + err := bn.commit(false, 0, nil, nil) assert.Equal(t, ErrNilNode, err) } func TestBranchNode_getEncodedNode(t *testing.T) { t.Parallel() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - expectedEncodedNode, _ := marsh.Marshal(bn) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + + expectedEncodedNode, _ := bn.marsh.Marshal(bn) expectedEncodedNode = append(expectedEncodedNode, branch) - encNode, err := bn.getEncodedNode(marsh) + encNode, err := bn.getEncodedNode() assert.Nil(t, err) assert.Equal(t, expectedEncodedNode, encNode) } func TestBranchNode_getEncodedNodeEmpty(t *testing.T) { t.Parallel() - bn := newBranchNode() - marsh, _ := getTestMarshAndHasher() - encNode, err := bn.getEncodedNode(marsh) + bn := emptyDirtyBranchNode() + + encNode, err := bn.getEncodedNode() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, encNode) } func TestBranchNode_getEncodedNodeNil(t *testing.T) { t.Parallel() + var bn *branchNode - marsh, _ := getTestMarshAndHasher() - encNode, err := bn.getEncodedNode(marsh) + encNode, err := bn.getEncodedNode() assert.Equal(t, ErrNilNode, err) assert.Nil(t, encNode) } func TestBranchNode_resolveCollapsed(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - _ = bn.setHash(marsh, hasher) - _ = bn.commit(0, db, marsh, hasher) - resolved := newLeafNode([]byte("dog"), []byte("dog")) + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + + _ = bn.setHash() + _ = bn.commit(false, 0, db, db) + resolved, _ := newLeafNode([]byte("dog"), []byte("dog"), bn.marsh, bn.hasher) resolved.dirty = false + resolved.hash = bn.EncodedChildren[childPos] - err := collapsedBn.resolveCollapsed(2, db, marsh) + err := collapsedBn.resolveCollapsed(childPos, db) assert.Nil(t, err) - assert.Equal(t, resolved, collapsedBn.children[2]) + assert.Equal(t, resolved, collapsedBn.children[childPos]) } func TestBranchNode_resolveCollapsedEmptyNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn := newBranchNode() - marsh, _ := getTestMarshAndHasher() - err := bn.resolveCollapsed(2, db, marsh) + bn := emptyDirtyBranchNode() + + err := bn.resolveCollapsed(2, nil) assert.Equal(t, ErrEmptyNode, err) } func TestBranchNode_resolveCollapsedENilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var bn *branchNode - marsh, _ := getTestMarshAndHasher() - err := bn.resolveCollapsed(2, db, marsh) + err := bn.resolveCollapsed(2, nil) assert.Equal(t, ErrNilNode, err) } func TestBranchNode_resolveCollapsedPosOutOfRange(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - err := bn.resolveCollapsed(17, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + + err := bn.resolveCollapsed(17, nil) assert.Equal(t, ErrChildPosOutOfRange, err) } func TestBranchNode_isCollapsed(t *testing.T) { t.Parallel() - bn, collapsedBn := getBnAndCollapsedBn() + + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) assert.True(t, collapsedBn.isCollapsed()) assert.False(t, bn.isCollapsed()) - collapsedBn.children[2] = newLeafNode([]byte("dog"), []byte("dog")) + collapsedBn.children[2], _ = newLeafNode([]byte("dog"), []byte("dog"), bn.marsh, bn.hasher) assert.False(t, collapsedBn.isCollapsed()) } func TestBranchNode_tryGet(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - key := []byte{2, 100, 111, 103} - val, err := bn.tryGet(key, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + + val, err := bn.tryGet(key, nil) assert.Equal(t, []byte("dog"), val) assert.Nil(t, err) } func TestBranchNode_tryGetEmptyKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) var key []byte - val, err := bn.tryGet(key, db, marsh) + + val, err := bn.tryGet(key, nil) assert.Nil(t, err) assert.Nil(t, val) } func TestBranchNode_tryGetChildPosOutOfRange(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - key := []byte{100, 111, 103} - val, err := bn.tryGet(key, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + key := []byte("dog") + + val, err := bn.tryGet(key, nil) assert.Equal(t, ErrChildPosOutOfRange, err) assert.Nil(t, val) } func TestBranchNode_tryGetNilChild(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - key := []byte{3} - val, err := bn.tryGet(key, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + nilChildKey := []byte{3} + + val, err := bn.tryGet(nilChildKey, nil) assert.Nil(t, err) assert.Nil(t, val) } func TestBranchNode_tryGetCollapsedNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - _ = bn.setHash(marsh, hasher) - _ = bn.commit(0, db, marsh, hasher) - key := []byte{2, 100, 111, 103} - val, err := collapsedBn.tryGet(key, db, marsh) + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + + _ = bn.setHash() + _ = bn.commit(false, 0, db, db) + + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + + val, err := collapsedBn.tryGet(key, db) assert.Equal(t, []byte("dog"), val) assert.Nil(t, err) } func TestBranchNode_tryGetEmptyNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn := newBranchNode() - marsh, _ := getTestMarshAndHasher() - key := []byte{2, 100, 111, 103} - val, err := bn.tryGet(key, db, marsh) + bn := emptyDirtyBranchNode() + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + + val, err := bn.tryGet(key, nil) assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, val) } func TestBranchNode_tryGetNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var bn *branchNode - marsh, _ := getTestMarshAndHasher() + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) - key := []byte{2, 100, 111, 103} - val, err := bn.tryGet(key, db, marsh) + val, err := bn.tryGet(key, nil) assert.Equal(t, ErrNilNode, err) assert.Nil(t, val) } func TestBranchNode_getNext(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - nextNode := newLeafNode([]byte("dog"), []byte("dog")) - key := []byte{2, 100, 111, 103} - node, key, err := bn.getNext(key, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + nextNode, _ := newLeafNode([]byte("dog"), []byte("dog"), bn.marsh, bn.hasher) + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) - h1, _ := encodeNodeAndGetHash(nextNode, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + node, key, err := bn.getNext(key, nil) + h1, _ := encodeNodeAndGetHash(nextNode) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) - assert.Equal(t, []byte{100, 111, 103}, key) + assert.Equal(t, []byte("dog"), key) assert.Nil(t, err) } func TestBranchNode_getNextWrongKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - key := []byte{100, 111, 103} - node, key, err := bn.getNext(key, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + key := []byte("dog") + + node, key, err := bn.getNext(key, nil) assert.Nil(t, node) assert.Nil(t, key) assert.Equal(t, ErrChildPosOutOfRange, err) @@ -504,12 +579,12 @@ func TestBranchNode_getNextWrongKey(t *testing.T) { func TestBranchNode_getNextNilChild(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - key := []byte{4, 100, 111, 103} - node, key, err := bn.getNext(key, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + nilChildPos := byte(4) + key := append([]byte{nilChildPos}, []byte("dog")...) + + node, key, err := bn.getNext(key, nil) assert.Nil(t, node) assert.Nil(t, key) assert.Equal(t, ErrNodeNotFound, err) @@ -517,13 +592,15 @@ func TestBranchNode_getNextNilChild(t *testing.T) { func TestBranchNode_insert(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - node := newLeafNode([]byte{0, 2, 3}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() - dirty, newBn, err := bn.insert(node, db, marsh) - bn.children[0] = newLeafNode([]byte{2, 3}, []byte("dogs")) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + nodeKey := []byte{0, 2, 3} + node, _ := newLeafNode(nodeKey, []byte("dogs"), bn.marsh, bn.hasher) + + dirty, newBn, _, err := bn.insert(node, nil) + nodeKeyRemainder := nodeKey[1:] + + bn.children[0], _ = newLeafNode(nodeKeyRemainder, []byte("dogs"), bn.marsh, bn.hasher) assert.True(t, dirty) assert.Nil(t, err) assert.Equal(t, bn, newBn) @@ -531,12 +608,11 @@ func TestBranchNode_insert(t *testing.T) { func TestBranchNode_insertEmptyKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - node := newLeafNode([]byte{}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() - dirty, newBn, err := bn.insert(node, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + node, _ := newLeafNode([]byte{}, []byte("dogs"), bn.marsh, bn.hasher) + + dirty, newBn, _, err := bn.insert(node, nil) assert.False(t, dirty) assert.Equal(t, ErrValueTooShort, err) assert.Nil(t, newBn) @@ -544,12 +620,11 @@ func TestBranchNode_insertEmptyKey(t *testing.T) { func TestBranchNode_insertChildPosOutOfRange(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - node := newLeafNode([]byte{100, 111, 103}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() - dirty, newBn, err := bn.insert(node, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + node, _ := newLeafNode([]byte("dog"), []byte("dogs"), bn.marsh, bn.hasher) + + dirty, newBn, _, err := bn.insert(node, nil) assert.False(t, dirty) assert.Equal(t, ErrChildPosOutOfRange, err) assert.Nil(t, newBn) @@ -557,28 +632,98 @@ func TestBranchNode_insertChildPosOutOfRange(t *testing.T) { func TestBranchNode_insertCollapsedNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, collapsedBn := getBnAndCollapsedBn() - node := newLeafNode([]byte{2, 100, 111, 103}, []byte("dogs")) - marsh, hasher := getTestMarshAndHasher() - _ = bn.setHash(marsh, hasher) - _ = bn.commit(0, db, marsh, hasher) - dirty, newBn, err := collapsedBn.insert(node, db, marsh) + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) + + _ = bn.setHash() + _ = bn.commit(false, 0, db, db) + + dirty, newBn, _, err := collapsedBn.insert(node, db) assert.True(t, dirty) assert.Nil(t, err) - val, _ := newBn.tryGet([]byte{2, 100, 111, 103}, db, marsh) + + val, _ := newBn.tryGet(key, db) assert.Equal(t, []byte("dogs"), val) } +func TestBranchNode_insertInStoredBnOnExistingPos(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) + + _ = bn.commit(false, 0, db, db) + bnHash := bn.getHash() + ln, _, _ := bn.getNext(key, db) + lnHash := ln.getHash() + expectedHashes := [][]byte{lnHash, bnHash} + + dirty, _, oldHashes, err := bn.insert(node, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, expectedHashes, oldHashes) +} + +func TestBranchNode_insertInStoredBnOnNilPos(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + nilChildPos := byte(11) + key := append([]byte{nilChildPos}, []byte("dog")...) + node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) + + _ = bn.commit(false, 0, db, db) + bnHash := bn.getHash() + expectedHashes := [][]byte{bnHash} + + dirty, _, oldHashes, err := bn.insert(node, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, expectedHashes, oldHashes) +} + +func TestBranchNode_insertInDirtyBnOnNilPos(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + nilChildPos := byte(11) + key := append([]byte{nilChildPos}, []byte("dog")...) + node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) + + dirty, _, oldHashes, err := bn.insert(node, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + +func TestBranchNode_insertInDirtyBnOnExistingPos(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + node, _ := newLeafNode(key, []byte("dogs"), bn.marsh, bn.hasher) + + dirty, _, oldHashes, err := bn.insert(node, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + func TestBranchNode_insertInNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var bn *branchNode - node := newLeafNode([]byte{0, 2, 3}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() - dirty, newBn, err := bn.insert(node, db, marsh) + dirty, newBn, _, err := bn.insert(&leafNode{}, nil) assert.False(t, dirty) assert.Equal(t, ErrNilNode, err) assert.Nil(t, newBn) @@ -586,32 +731,67 @@ func TestBranchNode_insertInNilNode(t *testing.T) { func TestBranchNode_delete(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) var children [nrOfChildren]node - children[6] = newLeafNode([]byte("doe"), []byte("doe")) - children[13] = newLeafNode([]byte("doge"), []byte("doge")) - expectedBn := newBranchNode() + children[6], _ = newLeafNode([]byte("doe"), []byte("doe"), bn.marsh, bn.hasher) + children[13], _ = newLeafNode([]byte("doge"), []byte("doge"), bn.marsh, bn.hasher) + expectedBn, _ := newBranchNode(bn.marsh, bn.hasher) expectedBn.children = children - dirty, newBn, err := bn.delete([]byte{2, 100, 111, 103}, db, marsh) + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + + dirty, newBn, _, err := bn.delete(key, nil) assert.True(t, dirty) assert.Nil(t, err) - _ = expectedBn.setHash(marsh, hasher) - _ = newBn.setHash(marsh, hasher) + _ = expectedBn.setHash() + _ = newBn.setHash() assert.Equal(t, expectedBn.getHash(), newBn.getHash()) } +func TestBranchNode_deleteFromStoredBn(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + lnKey := append([]byte{childPos}, []byte("dog")...) + + _ = bn.commit(false, 0, db, db) + bnHash := bn.getHash() + ln, _, _ := bn.getNext(lnKey, db) + lnHash := ln.getHash() + expectedHashes := [][]byte{lnHash, bnHash} + + dirty, _, oldHashes, err := bn.delete(lnKey, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, expectedHashes, oldHashes) +} + +func TestBranchNode_deleteFromDirtyBn(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + lnKey := append([]byte{childPos}, []byte("dog")...) + + dirty, _, oldHashes, err := bn.delete(lnKey, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + func TestBranchNode_deleteEmptyNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn := newBranchNode() - marsh, _ := getTestMarshAndHasher() - dirty, newBn, err := bn.delete([]byte{2, 100, 111, 103}, db, marsh) + bn := emptyDirtyBranchNode() + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + + dirty, newBn, _, err := bn.delete(key, nil) assert.False(t, dirty) assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, newBn) @@ -619,11 +799,12 @@ func TestBranchNode_deleteEmptyNode(t *testing.T) { func TestBranchNode_deleteNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var bn *branchNode - marsh, _ := getTestMarshAndHasher() + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) - dirty, newBn, err := bn.delete([]byte{2, 100, 111, 103}, db, marsh) + dirty, newBn, _, err := bn.delete(key, nil) assert.False(t, dirty) assert.Equal(t, ErrNilNode, err) assert.Nil(t, newBn) @@ -631,11 +812,10 @@ func TestBranchNode_deleteNilNode(t *testing.T) { func TestBranchNode_deleteEmptykey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, _ := getBnAndCollapsedBn() - marsh, _ := getTestMarshAndHasher() - dirty, newBn, err := bn.delete([]byte{}, db, marsh) + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + + dirty, newBn, _, err := bn.delete([]byte{}, nil) assert.False(t, dirty) assert.Equal(t, ErrValueTooShort, err) assert.Nil(t, newBn) @@ -643,34 +823,40 @@ func TestBranchNode_deleteEmptykey(t *testing.T) { func TestBranchNode_deleteCollapsedNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - bn, collapsedBn := getBnAndCollapsedBn() - marsh, hasher := getTestMarshAndHasher() - _ = bn.setHash(marsh, hasher) - _ = bn.commit(0, db, marsh, hasher) - dirty, newBn, err := collapsedBn.delete([]byte{2, 100, 111, 103}, db, marsh) + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + _ = bn.setHash() + _ = bn.commit(false, 0, db, db) + + childPos := byte(2) + key := append([]byte{childPos}, []byte("dog")...) + + dirty, newBn, _, err := collapsedBn.delete(key, db) assert.True(t, dirty) assert.Nil(t, err) - val, err := newBn.tryGet([]byte{2, 100, 111, 103}, db, marsh) + val, err := newBn.tryGet(key, db) assert.Nil(t, val) assert.Nil(t, err) } func TestBranchNode_deleteAndReduceBn(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, _ := getTestMarshAndHasher() + bn, _ := newBranchNode(getTestMarshAndHasher()) var children [nrOfChildren]node - children[2] = newLeafNode([]byte("dog"), []byte("dog")) - children[6] = newLeafNode([]byte("doe"), []byte("doe")) - bn := newBranchNode() + firstChildPos := byte(2) + secondChildPos := byte(6) + children[firstChildPos], _ = newLeafNode([]byte("dog"), []byte("dog"), bn.marsh, bn.hasher) + children[secondChildPos], _ = newLeafNode([]byte("doe"), []byte("doe"), bn.marsh, bn.hasher) bn.children = children - ln := newLeafNode([]byte{2, 100, 111, 103}, []byte("dog")) - dirty, newBn, err := bn.delete([]byte{6, 100, 111, 101}, db, marsh) + key := append([]byte{firstChildPos}, []byte("dog")...) + ln, _ := newLeafNode(key, []byte("dog"), bn.marsh, bn.hasher) + + key = append([]byte{secondChildPos}, []byte("doe")...) + dirty, newBn, _, err := bn.delete(key, nil) assert.True(t, dirty) assert.Nil(t, err) assert.Equal(t, ln, newBn) @@ -678,18 +864,25 @@ func TestBranchNode_deleteAndReduceBn(t *testing.T) { func TestBranchNode_reduceNode(t *testing.T) { t.Parallel() + + bn, _ := newBranchNode(getTestMarshAndHasher()) var children [nrOfChildren]node - children[2] = newLeafNode([]byte("dog"), []byte("dog")) - bn := newBranchNode() + childPos := byte(2) + children[childPos], _ = newLeafNode([]byte("dog"), []byte("dog"), bn.marsh, bn.hasher) bn.children = children - ln := newLeafNode([]byte{2, 100, 111, 103}, []byte("dog")) - node := bn.children[2].reduceNode(2) + + key := append([]byte{childPos}, []byte("dog")...) + ln, _ := newLeafNode(key, []byte("dog"), bn.marsh, bn.hasher) + + node, err := bn.children[childPos].reduceNode(int(childPos)) assert.Equal(t, ln, node) + assert.Nil(t, err) } func TestBranchNode_getChildPosition(t *testing.T) { t.Parallel() - bn, _ := getBnAndCollapsedBn() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) nr, pos := getChildPosition(bn) assert.Equal(t, 3, nr) assert.Equal(t, 13, pos) @@ -697,7 +890,8 @@ func TestBranchNode_getChildPosition(t *testing.T) { func TestBranchNode_clone(t *testing.T) { t.Parallel() - bn, _ := getBnAndCollapsedBn() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) clone := bn.clone() assert.False(t, bn == clone) assert.Equal(t, bn, clone) @@ -705,7 +899,8 @@ func TestBranchNode_clone(t *testing.T) { func TestBranchNode_isEmptyOrNil(t *testing.T) { t.Parallel() - bn := newBranchNode() + + bn := emptyDirtyBranchNode() assert.Equal(t, ErrEmptyNode, bn.isEmptyOrNil()) bn = nil @@ -714,8 +909,9 @@ func TestBranchNode_isEmptyOrNil(t *testing.T) { func TestReduceBranchNodeWithExtensionNodeChildShouldWork(t *testing.T) { t.Parallel() - tr := newEmptyTrie() - expectedTr := newEmptyTrie() + + tr, _, _ := newEmptyTrie() + expectedTr, _, _ := newEmptyTrie() _ = expectedTr.Update([]byte("dog"), []byte("dog")) _ = expectedTr.Update([]byte("doll"), []byte("doll")) @@ -727,14 +923,14 @@ func TestReduceBranchNodeWithExtensionNodeChildShouldWork(t *testing.T) { expectedHash, _ := expectedTr.Root() hash, _ := tr.Root() - assert.Equal(t, expectedHash, hash) } func TestReduceBranchNodeWithBranchNodeChildShouldWork(t *testing.T) { t.Parallel() - tr := newEmptyTrie() - expectedTr := newEmptyTrie() + + tr, _, _ := newEmptyTrie() + expectedTr, _, _ := newEmptyTrie() _ = expectedTr.Update([]byte("dog"), []byte("puppy")) _ = expectedTr.Update([]byte("dogglesworth"), []byte("cat")) @@ -746,14 +942,14 @@ func TestReduceBranchNodeWithBranchNodeChildShouldWork(t *testing.T) { expectedHash, _ := expectedTr.Root() hash, _ := tr.Root() - assert.Equal(t, expectedHash, hash) } func TestReduceBranchNodeWithLeafNodeChildShouldWork(t *testing.T) { t.Parallel() - tr := newEmptyTrie() - expectedTr := newEmptyTrie() + + tr, _, _ := newEmptyTrie() + expectedTr, _, _ := newEmptyTrie() _ = expectedTr.Update([]byte("doe"), []byte("reindeer")) _ = expectedTr.Update([]byte("dogglesworth"), []byte("cat")) @@ -765,14 +961,14 @@ func TestReduceBranchNodeWithLeafNodeChildShouldWork(t *testing.T) { expectedHash, _ := expectedTr.Root() hash, _ := tr.Root() - assert.Equal(t, expectedHash, hash) } func TestReduceBranchNodeWithLeafNodeValueShouldWork(t *testing.T) { t.Parallel() - tr := newEmptyTrie() - expectedTr := newEmptyTrie() + + tr, _, _ := newEmptyTrie() + expectedTr, _, _ := newEmptyTrie() _ = expectedTr.Update([]byte("doe"), []byte("reindeer")) _ = expectedTr.Update([]byte("dog"), []byte("puppy")) @@ -788,11 +984,93 @@ func TestReduceBranchNodeWithLeafNodeValueShouldWork(t *testing.T) { assert.Equal(t, expectedHash, hash) } -func newEmptyTrie() data.Trie { - db, _ := mock.NewMemDbMock() - marsh, hsh := getTestMarshAndHasher() - tr, _ := NewTrie(db, marsh, hsh) - return tr +func TestBranchNode_getChildren(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + + children, err := bn.getChildren(nil) + assert.Nil(t, err) + assert.Equal(t, 3, len(children)) +} + +func TestBranchNode_getChildrenCollapsedBn(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + _ = bn.commit(true, 0, db, db) + + children, err := collapsedBn.getChildren(db) + assert.Nil(t, err) + assert.Equal(t, 3, len(children)) +} + +func TestBranchNode_isValid(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + assert.True(t, bn.isValid()) + + bn.children[2] = nil + bn.children[6] = nil + assert.False(t, bn.isValid()) +} + +func TestBranchNode_setDirty(t *testing.T) { + t.Parallel() + + bn := &branchNode{baseNode: &baseNode{}} + bn.setDirty(true) + + assert.True(t, bn.dirty) +} + +func TestBranchNode_loadChildren(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + tr := initTrie() + nodes, hashes := getEncodedTrieNodesAndHashes(tr) + nodesCacher, _ := lrucache.NewCache(100) + + resolver := &mock.TrieNodesResolverStub{ + RequestDataFromHashCalled: func(hash []byte) error { + for i := range nodes { + node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) + nodesCacher.Put(node.hash, node) + } + return nil + }, + } + syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second) + syncer.interceptedNodes.RegisterHandler(func(key []byte) { + syncer.chRcvTrieNodes <- true + }) + + bnHashPosition := 1 + firstChildPos := 5 + firstChildHash := 2 + secondChildPos := 7 + secondChildHash := 3 + encodedChildren := make([][]byte, nrOfChildren) + encodedChildren[firstChildPos] = hashes[firstChildHash] + encodedChildren[secondChildPos] = hashes[secondChildHash] + bn := &branchNode{ + CollapsedBn: protobuf.CollapsedBn{ + EncodedChildren: encodedChildren, + }, + baseNode: &baseNode{ + hash: hashes[bnHashPosition], + }, + } + + err := bn.loadChildren(syncer) + assert.Nil(t, err) + assert.NotNil(t, bn.children[firstChildPos]) + assert.NotNil(t, bn.children[secondChildPos]) + + assert.Equal(t, 5, nodesCacher.Len()) } //------- deepClone @@ -800,16 +1078,16 @@ func newEmptyTrie() data.Trie { func TestBranchNode_deepCloneWithNilHashShouldWork(t *testing.T) { t.Parallel() - bn := &branchNode{} + bn := &branchNode{baseNode: &baseNode{}} bn.dirty = true bn.hash = nil bn.EncodedChildren = make([][]byte, len(bn.children)) bn.EncodedChildren[4] = getRandomByteSlice() bn.EncodedChildren[5] = getRandomByteSlice() bn.EncodedChildren[12] = getRandomByteSlice() - bn.children[4] = &leafNode{} - bn.children[5] = &leafNode{} - bn.children[12] = &leafNode{} + bn.children[4] = &leafNode{baseNode: &baseNode{}} + bn.children[5] = &leafNode{baseNode: &baseNode{}} + bn.children[12] = &leafNode{baseNode: &baseNode{}} cloned := bn.deepClone().(*branchNode) @@ -819,16 +1097,16 @@ func TestBranchNode_deepCloneWithNilHashShouldWork(t *testing.T) { func TestBranchNode_deepCloneShouldWork(t *testing.T) { t.Parallel() - bn := &branchNode{} + bn := &branchNode{baseNode: &baseNode{}} bn.dirty = true bn.hash = getRandomByteSlice() bn.EncodedChildren = make([][]byte, len(bn.children)) bn.EncodedChildren[4] = getRandomByteSlice() bn.EncodedChildren[5] = getRandomByteSlice() bn.EncodedChildren[12] = getRandomByteSlice() - bn.children[4] = &leafNode{} - bn.children[5] = &leafNode{} - bn.children[12] = &leafNode{} + bn.children[4] = &leafNode{baseNode: &baseNode{}} + bn.children[5] = &leafNode{baseNode: &baseNode{}} + bn.children[12] = &leafNode{baseNode: &baseNode{}} cloned := bn.deepClone().(*branchNode) @@ -877,10 +1155,10 @@ func getBranchNodeContents(bn *branchNode) string { } str := fmt.Sprintf(`extension node: - encoded child: %s - hash: %s - child: %s, - dirty: %v + encoded child: %s + hash: %s + child: %s, + dirty: %v `, encodedChildsString, hex.EncodeToString(bn.hash), @@ -891,8 +1169,8 @@ func getBranchNodeContents(bn *branchNode) string { } func BenchmarkDecodeBranchNode(b *testing.B) { - tr := newEmptyTrie() marsh, hsh := getTestMarshAndHasher() + tr, _, _ := newEmptyTrie() nrValuesInTrie := 100000 values := make([][]byte, nrValuesInTrie) @@ -906,12 +1184,12 @@ func BenchmarkDecodeBranchNode(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = decodeNode(proof[0], marsh) + _, _ = decodeNode(proof[0], marsh, hsh) } } func BenchmarkMarshallNodeCapnp(b *testing.B) { - bn, _ := getBnAndCollapsedBn() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) marsh := &marshal.CapnpMarshalizer{} b.ResetTimer() for i := 0; i < b.N; i++ { @@ -920,7 +1198,7 @@ func BenchmarkMarshallNodeCapnp(b *testing.B) { } func BenchmarkMarshallNodeJson(b *testing.B) { - bn, _ := getBnAndCollapsedBn() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) marsh := marshal.JsonMarshalizer{} b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/data/trie/errors.go b/data/trie/errors.go index 13b441215c5..241f964ff2c 100644 --- a/data/trie/errors.go +++ b/data/trie/errors.go @@ -36,3 +36,33 @@ var ErrNilNode = errors.New("the node is nil") // ErrInvalidLength signals that length of the array is invalid var ErrInvalidLength = errors.New("invalid array length") + +// ErrWrongTypeAssertion signals that wrong type was provided +var ErrWrongTypeAssertion = errors.New("wrong type assertion") + +// ErrNilTrie is raised when the trie is nil +var ErrNilTrie = errors.New("the trie is nil") + +// ErrNilResolver is raised when the given resolver is nil +var ErrNilResolver = errors.New("the resolver is nil") + +// ErrInvalidHash is raised when the given hash is invalid +var ErrInvalidHash = errors.New("the received hash is invalid") + +// ErrTimeIsOut signals that time is out +var ErrTimeIsOut = errors.New("time is out") + +// ErrHashNotFound signals that the given hash was not found in db or snapshots +var ErrHashNotFound = errors.New("hash not found") + +// ErrNilTrieStorage is raised when a nil trie storage is provided +var ErrNilTrieStorage = errors.New("nil trie storage provided") + +// ErrNilEvictionWaitingList is raised when a nil eviction waiting list is provided +var ErrNilEvictionWaitingList = errors.New("nil eviction waiting list provided") + +// ErrNilSnapshotDbConfig is raised when a nil snapshot db config is provided +var ErrNilSnapshotDbConfig = errors.New("nil snapshot db config provided") + +// ErrNilPathManager signals that a nil path manager has been provided +var ErrNilPathManager = errors.New("nil path manager") diff --git a/data/trie/evictionWaitingList/evictionWaitingList.go b/data/trie/evictionWaitingList/evictionWaitingList.go new file mode 100644 index 00000000000..798473405b1 --- /dev/null +++ b/data/trie/evictionWaitingList/evictionWaitingList.go @@ -0,0 +1,96 @@ +package evictionWaitingList + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// evictionWaitingList is a structure that caches keys that need to be removed from a certain database. +// If the cache is full, the keys will be stored in the underlying database. Writing at the same key in +// cacher and db will overwrite the previous values. This structure is not concurrent safe. +type evictionWaitingList struct { + cache map[string][][]byte + cacheSize uint + db storage.Persister + marshalizer marshal.Marshalizer +} + +// NewEvictionWaitingList creates a new instance of evictionWaitingList +func NewEvictionWaitingList(size uint, db storage.Persister, marshalizer marshal.Marshalizer) (*evictionWaitingList, error) { + if size < 1 { + return nil, data.ErrInvalidCacheSize + } + if db == nil || db.IsInterfaceNil() { + return nil, data.ErrNilDatabase + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, data.ErrNilMarshalizer + } + + return &evictionWaitingList{ + cache: make(map[string][][]byte), + cacheSize: size, + db: db, + marshalizer: marshalizer, + }, nil +} + +// Put stores the given hashes in the eviction waiting list, in the position given by the root hash +func (ewl *evictionWaitingList) Put(rootHash []byte, hashes [][]byte) error { + if uint(len(ewl.cache)) < ewl.cacheSize { + ewl.cache[string(rootHash)] = hashes + return nil + } + + marshalizedHashes, err := ewl.marshalizer.Marshal(hashes) + if err != nil { + return err + } + + err = ewl.db.Put(rootHash, marshalizedHashes) + if err != nil { + return err + } + + return nil +} + +// Evict returns and removes from the waiting list all the hashes from the position given by the root hash +func (ewl *evictionWaitingList) Evict(rootHash []byte) ([][]byte, error) { + hashes, ok := ewl.cache[string(rootHash)] + if ok { + delete(ewl.cache, string(rootHash)) + return hashes, nil + } + + marshalizedHashes, err := ewl.db.Get(rootHash) + if err != nil { + return nil, err + } + + err = ewl.marshalizer.Unmarshal(&hashes, marshalizedHashes) + if err != nil { + return nil, err + } + + err = ewl.db.Remove(rootHash) + if err != nil { + return nil, err + } + + return hashes, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ewl *evictionWaitingList) IsInterfaceNil() bool { + if ewl == nil { + return true + } + return false +} + +// GetSize returns the size of the cache +func (ewl *evictionWaitingList) GetSize() uint { + return ewl.cacheSize +} diff --git a/data/trie/evictionWaitingList/evictionWaitingList_test.go b/data/trie/evictionWaitingList/evictionWaitingList_test.go new file mode 100644 index 00000000000..549eee6f082 --- /dev/null +++ b/data/trie/evictionWaitingList/evictionWaitingList_test.go @@ -0,0 +1,160 @@ +package evictionWaitingList + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/stretchr/testify/assert" +) + +func getDefaultParameters() (uint, storage.Persister, marshal.Marshalizer) { + return 10, memorydb.New(), &mock.MarshalizerMock{} +} + +func TestNewEvictionWaitingList(t *testing.T) { + t.Parallel() + + ec, err := NewEvictionWaitingList(getDefaultParameters()) + assert.Nil(t, err) + assert.NotNil(t, ec) +} + +func TestNewEvictionWaitingList_InvalidCacheSize(t *testing.T) { + t.Parallel() + + _, db, marsh := getDefaultParameters() + ec, err := NewEvictionWaitingList(0, db, marsh) + assert.Nil(t, ec) + assert.Equal(t, data.ErrInvalidCacheSize, err) +} + +func TestNewEvictionWaitingList_NilDatabase(t *testing.T) { + t.Parallel() + + size, _, marsh := getDefaultParameters() + ec, err := NewEvictionWaitingList(size, nil, marsh) + assert.Nil(t, ec) + assert.Equal(t, data.ErrNilDatabase, err) +} + +func TestNewEvictionWaitingList_NilDMarshalizer(t *testing.T) { + t.Parallel() + + size, db, _ := getDefaultParameters() + ec, err := NewEvictionWaitingList(size, db, nil) + assert.Nil(t, ec) + assert.Equal(t, data.ErrNilMarshalizer, err) +} + +func TestEvictionWaitingList_Put(t *testing.T) { + t.Parallel() + + ec, _ := NewEvictionWaitingList(getDefaultParameters()) + + hashes := [][]byte{ + []byte("hash1"), + []byte("hash2"), + } + root := []byte("root") + + err := ec.Put(root, hashes) + + assert.Nil(t, err) + assert.Equal(t, 1, len(ec.cache)) + assert.Equal(t, hashes, ec.cache[string(root)]) +} + +func TestEvictionWaitingList_PutMultiple(t *testing.T) { + t.Parallel() + + cacheSize := uint(2) + _, db, marsh := getDefaultParameters() + ec, _ := NewEvictionWaitingList(cacheSize, db, marsh) + + hashes := [][]byte{ + []byte("hash0"), + []byte("hash1"), + } + roots := [][]byte{ + []byte("root0"), + []byte("root1"), + []byte("root2"), + []byte("root3"), + } + + for i := range roots { + err := ec.Put(roots[i], hashes) + assert.Nil(t, err) + } + + assert.Equal(t, 2, len(ec.cache)) + for i := uint(0); i < cacheSize; i++ { + assert.Equal(t, hashes, ec.cache[string(roots[i])]) + } + for i := cacheSize; i < uint(len(roots)); i++ { + val := make([][]byte, 0) + encVal, err := ec.db.Get(roots[i]) + assert.Nil(t, err) + + err = ec.marshalizer.Unmarshal(&val, encVal) + + assert.Nil(t, err) + assert.Equal(t, hashes, val) + } + +} + +func TestEvictionWaitingList_Evict(t *testing.T) { + t.Parallel() + + ec, _ := NewEvictionWaitingList(getDefaultParameters()) + + expectedHashes := [][]byte{ + []byte("hash1"), + []byte("hash2"), + } + root1 := []byte("root1") + + _ = ec.Put(root1, expectedHashes) + + hashes, err := ec.Evict([]byte("root1")) + assert.Nil(t, err) + assert.Equal(t, 0, len(ec.cache)) + assert.Equal(t, expectedHashes, hashes) +} + +func TestEvictionWaitingList_EvictFromDB(t *testing.T) { + t.Parallel() + + cacheSize := uint(2) + _, db, marsh := getDefaultParameters() + ec, _ := NewEvictionWaitingList(cacheSize, db, marsh) + + hashes := [][]byte{ + []byte("hash0"), + []byte("hash1"), + } + roots := [][]byte{ + []byte("root0"), + []byte("root1"), + []byte("root2"), + } + + for i := range roots { + _ = ec.Put(roots[i], hashes) + } + + val, _ := ec.db.Get(roots[2]) + assert.NotNil(t, val) + + vals, err := ec.Evict(roots[2]) + assert.Nil(t, err) + assert.Equal(t, hashes, vals) + + val, _ = ec.db.Get(roots[2]) + assert.Nil(t, val) +} diff --git a/data/trie/extensionNode.go b/data/trie/extensionNode.go index 79bf620a6bc..46a1ef43728 100644 --- a/data/trie/extensionNode.go +++ b/data/trie/extensionNode.go @@ -53,27 +53,57 @@ func extensionNodeCapnToGo(src capnp.ExtensionNodeCapn, dest *extensionNode) *ex return dest } -func newExtensionNode(key []byte, child node) *extensionNode { +func newExtensionNode(key []byte, child node, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*extensionNode, error) { + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, ErrNilHasher + } + return &extensionNode{ CollapsedEn: protobuf.CollapsedEn{ Key: key, EncodedChild: nil, }, child: child, - hash: nil, - dirty: true, - } + baseNode: &baseNode{ + dirty: true, + marsh: marshalizer, + hasher: hasher, + }, + }, nil } func (en *extensionNode) getHash() []byte { return en.hash } +func (en *extensionNode) setGivenHash(hash []byte) { + en.hash = hash +} + func (en *extensionNode) isDirty() bool { return en.dirty } -func (en *extensionNode) getCollapsed(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { +func (en *extensionNode) getMarshalizer() marshal.Marshalizer { + return en.marsh +} + +func (en *extensionNode) setMarshalizer(marshalizer marshal.Marshalizer) { + en.marsh = marshalizer +} + +func (en *extensionNode) getHasher() hashing.Hasher { + return en.hasher +} + +func (en *extensionNode) setHasher(hasher hashing.Hasher) { + en.hasher = hasher +} + +func (en *extensionNode) getCollapsed() (node, error) { err := en.isEmptyOrNil() if err != nil { return nil, err @@ -87,7 +117,7 @@ func (en *extensionNode) getCollapsed(marshalizer marshal.Marshalizer, hasher ha return nil, err } if !ok { - err := en.child.setHash(marshalizer, hasher) + err = en.child.setHash() if err != nil { return nil, err } @@ -97,7 +127,7 @@ func (en *extensionNode) getCollapsed(marshalizer marshal.Marshalizer, hasher ha return collapsed, nil } -func (en *extensionNode) setHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (en *extensionNode) setHash() error { err := en.isEmptyOrNil() if err != nil { return err @@ -106,14 +136,15 @@ func (en *extensionNode) setHash(marshalizer marshal.Marshalizer, hasher hashing return nil } if en.isCollapsed() { - hash, err := encodeNodeAndGetHash(en, marshalizer, hasher) + var hash []byte + hash, err = encodeNodeAndGetHash(en) if err != nil { return err } en.hash = hash return nil } - hash, err := hashChildrenAndNode(en, marshalizer, hasher) + hash, err := hashChildrenAndNode(en) if err != nil { return err } @@ -121,24 +152,24 @@ func (en *extensionNode) setHash(marshalizer marshal.Marshalizer, hasher hashing return nil } -func (en *extensionNode) setHashConcurrent(marshalizer marshal.Marshalizer, hasher hashing.Hasher, wg *sync.WaitGroup, c chan error) { - err := en.setHash(marshalizer, hasher) +func (en *extensionNode) setHashConcurrent(wg *sync.WaitGroup, c chan error) { + err := en.setHash() if err != nil { c <- err } wg.Done() } -func (en *extensionNode) setRootHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { - return en.setHash(marshalizer, hasher) +func (en *extensionNode) setRootHash() error { + return en.setHash() } -func (en *extensionNode) hashChildren(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (en *extensionNode) hashChildren() error { err := en.isEmptyOrNil() if err != nil { return err } if en.child != nil { - err = en.child.setHash(marshalizer, hasher) + err = en.child.setHash() if err != nil { return err } @@ -146,44 +177,56 @@ func (en *extensionNode) hashChildren(marshalizer marshal.Marshalizer, hasher ha return nil } -func (en *extensionNode) hashNode(marshalizer marshal.Marshalizer, hasher hashing.Hasher) ([]byte, error) { +func (en *extensionNode) hashNode() ([]byte, error) { err := en.isEmptyOrNil() if err != nil { return nil, err } if en.child != nil { - encChild, err := encodeNodeAndGetHash(en.child, marshalizer, hasher) + var encChild []byte + encChild, err = encodeNodeAndGetHash(en.child) if err != nil { return nil, err } en.EncodedChild = encChild } - return encodeNodeAndGetHash(en, marshalizer, hasher) + return encodeNodeAndGetHash(en) } -func (en *extensionNode) commit(level byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (en *extensionNode) commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error { level++ err := en.isEmptyOrNil() if err != nil { return err } - if !en.dirty { + + shouldNotCommit := !en.dirty && !force + if shouldNotCommit { return nil } + + if force { + err = resolveIfCollapsed(en, 0, originDb) + if err != nil { + return err + } + } + if en.child != nil { - err = en.child.commit(level, db, marshalizer, hasher) + err = en.child.commit(force, level, originDb, targetDb) if err != nil { return err } } en.dirty = false - err = encodeNodeAndCommitToDB(en, db, marshalizer, hasher) + err = encodeNodeAndCommitToDB(en, targetDb) if err != nil { return err } if level == maxTrieLevelAfterCommit { - collapsed, err := en.getCollapsed(marshalizer, hasher) + var collapsed node + collapsed, err = en.getCollapsed() if err != nil { return err } @@ -194,12 +237,12 @@ func (en *extensionNode) commit(level byte, db data.DBWriteCacher, marshalizer m return nil } -func (en *extensionNode) getEncodedNode(marshalizer marshal.Marshalizer) ([]byte, error) { +func (en *extensionNode) getEncodedNode() ([]byte, error) { err := en.isEmptyOrNil() if err != nil { return nil, err } - marshaledNode, err := marshalizer.Marshal(en) + marshaledNode, err := en.marsh.Marshal(en) if err != nil { return nil, err } @@ -207,15 +250,16 @@ func (en *extensionNode) getEncodedNode(marshalizer marshal.Marshalizer) ([]byte return marshaledNode, nil } -func (en *extensionNode) resolveCollapsed(pos byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { +func (en *extensionNode) resolveCollapsed(pos byte, db data.DBWriteCacher) error { err := en.isEmptyOrNil() if err != nil { return err } - child, err := getNodeFromDBAndDecode(en.EncodedChild, db, marshalizer) + child, err := getNodeFromDBAndDecode(en.EncodedChild, db, en.marsh, en.hasher) if err != nil { return err } + child.setGivenHash(en.EncodedChild) en.child = child return nil } @@ -228,7 +272,7 @@ func (en *extensionNode) isPosCollapsed(pos int) bool { return en.isCollapsed() } -func (en *extensionNode) tryGet(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (value []byte, err error) { +func (en *extensionNode) tryGet(key []byte, db data.DBWriteCacher) (value []byte, err error) { err = en.isEmptyOrNil() if err != nil { return nil, err @@ -242,15 +286,15 @@ func (en *extensionNode) tryGet(key []byte, db data.DBWriteCacher, marshalizer m return nil, nil } key = key[len(en.Key):] - err = resolveIfCollapsed(en, 0, db, marshalizer) + err = resolveIfCollapsed(en, 0, db) if err != nil { return nil, err } - return en.child.tryGet(key, db, marshalizer) + return en.child.tryGet(key, db) } -func (en *extensionNode) getNext(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (node, []byte, error) { +func (en *extensionNode) getNext(key []byte, db data.DBWriteCacher) (node, []byte, error) { err := en.isEmptyOrNil() if err != nil { return nil, nil, err @@ -263,7 +307,7 @@ func (en *extensionNode) getNext(key []byte, db data.DBWriteCacher, marshalizer if keysDontMatch { return nil, nil, ErrNodeNotFound } - err = resolveIfCollapsed(en, 0, db, marshalizer) + err = resolveIfCollapsed(en, 0, db) if err != nil { return nil, nil, err } @@ -272,85 +316,147 @@ func (en *extensionNode) getNext(key []byte, db data.DBWriteCacher, marshalizer return en.child, key, nil } -func (en *extensionNode) insert(n *leafNode, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) { +func (en *extensionNode) insert(n *leafNode, db data.DBWriteCacher) (bool, node, [][]byte, error) { + emptyHashes := make([][]byte, 0) err := en.isEmptyOrNil() if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } - err = resolveIfCollapsed(en, 0, db, marshalizer) + err = resolveIfCollapsed(en, 0, db) if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } keyMatchLen := prefixLen(n.Key, en.Key) // If the whole key matches, keep this extension node as is // and only update the value. if keyMatchLen == len(en.Key) { + var dirty bool + var newNode, newEn node + var oldHashes [][]byte + n.Key = n.Key[keyMatchLen:] - dirty, newNode, err := en.child.insert(n, db, marshalizer) + dirty, newNode, oldHashes, err = en.child.insert(n, db) if !dirty || err != nil { - return false, nil, err + return false, nil, emptyHashes, err + } + + if !en.dirty { + oldHashes = append(oldHashes, en.hash) + } + + newEn, err = newExtensionNode(en.Key, newNode, en.marsh, en.hasher) + if err != nil { + return false, nil, emptyHashes, err } - return true, newExtensionNode(en.Key, newNode), nil + + return true, newEn, oldHashes, nil + } + + oldHash := make([][]byte, 0) + if !en.dirty { + oldHash = append(oldHash, en.hash) } + // Otherwise branch out at the index where they differ. - branch := newBranchNode() + bn, err := newBranchNode(en.marsh, en.hasher) + if err != nil { + return false, nil, emptyHashes, err + } + oldChildPos := en.Key[keyMatchLen] newChildPos := n.Key[keyMatchLen] if childPosOutOfRange(oldChildPos) || childPosOutOfRange(newChildPos) { - return false, nil, ErrChildPosOutOfRange + return false, nil, emptyHashes, ErrChildPosOutOfRange + } + + followingExtensionNode, err := newExtensionNode(en.Key[keyMatchLen+1:], en.child, en.marsh, en.hasher) + if err != nil { + return false, nil, emptyHashes, err } - followingExtensionNode := newExtensionNode(en.Key[keyMatchLen+1:], en.child) if len(followingExtensionNode.Key) < 1 { - branch.children[oldChildPos] = en.child + bn.children[oldChildPos] = en.child } else { - branch.children[oldChildPos] = followingExtensionNode + bn.children[oldChildPos] = followingExtensionNode } n.Key = n.Key[keyMatchLen+1:] - branch.children[newChildPos] = n + bn.children[newChildPos] = n if keyMatchLen == 0 { - return true, branch, nil + return true, bn, oldHash, nil + } + + newEn, err := newExtensionNode(en.Key[:keyMatchLen], bn, en.marsh, en.hasher) + if err != nil { + return false, nil, emptyHashes, err } - return true, newExtensionNode(en.Key[:keyMatchLen], branch), nil + + return true, newEn, oldHash, nil } -func (en *extensionNode) delete(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) { +func (en *extensionNode) delete(key []byte, db data.DBWriteCacher) (bool, node, [][]byte, error) { + emptyHashes := make([][]byte, 0) err := en.isEmptyOrNil() if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } if len(key) == 0 { - return false, nil, ErrValueTooShort + return false, nil, emptyHashes, ErrValueTooShort } keyMatchLen := prefixLen(key, en.Key) if keyMatchLen < len(en.Key) { - return false, en, nil + return false, en, emptyHashes, nil } - err = resolveIfCollapsed(en, 0, db, marshalizer) + err = resolveIfCollapsed(en, 0, db) if err != nil { - return false, nil, err + return false, nil, emptyHashes, err } - dirty, newNode, err := en.child.delete(key[len(en.Key):], db, marshalizer) + dirty, newNode, oldHashes, err := en.child.delete(key[len(en.Key):], db) if !dirty || err != nil { - return false, en, err + return false, en, emptyHashes, err + } + + if !en.dirty { + oldHashes = append(oldHashes, en.hash) } + var n node switch newNode := newNode.(type) { case *leafNode: - return true, newLeafNode(concat(en.Key, newNode.Key...), newNode.Value), nil + n, err = newLeafNode(concat(en.Key, newNode.Key...), newNode.Value, en.marsh, en.hasher) + if err != nil { + return false, nil, emptyHashes, err + } + + return true, n, oldHashes, nil case *extensionNode: - return true, newExtensionNode(concat(en.Key, newNode.Key...), newNode.child), nil + n, err = newExtensionNode(concat(en.Key, newNode.Key...), newNode.child, en.marsh, en.hasher) + if err != nil { + return false, nil, emptyHashes, err + } + + return true, n, oldHashes, nil default: - return true, newExtensionNode(en.Key, newNode), nil + n, err = newExtensionNode(en.Key, newNode, en.marsh, en.hasher) + if err != nil { + return false, nil, emptyHashes, err + } + + return true, n, oldHashes, nil } } -func (en *extensionNode) reduceNode(pos int) node { +func (en *extensionNode) reduceNode(pos int) (node, error) { k := append([]byte{byte(pos)}, en.Key...) - return newExtensionNode(k, en.child) + + newEn, err := newExtensionNode(k, en.child, en.marsh, en.hasher) + if err != nil { + return nil, err + } + + return newEn, nil } func (en *extensionNode) clone() *extensionNode { @@ -392,7 +498,7 @@ func (en *extensionNode) deepClone() node { return nil } - clonedNode := &extensionNode{} + clonedNode := &extensionNode{baseNode: &baseNode{}} if en.Key != nil { clonedNode.Key = make([]byte, len(en.Key)) @@ -415,16 +521,96 @@ func (en *extensionNode) deepClone() node { clonedNode.child = en.child.deepClone() } + clonedNode.marsh = en.marsh + clonedNode.hasher = en.hasher + return clonedNode } +func (en *extensionNode) getDirtyHashes() ([][]byte, error) { + err := en.isEmptyOrNil() + if err != nil { + return nil, err + } + + dirtyHashes := make([][]byte, 0) + + if !en.isDirty() { + return dirtyHashes, nil + } + + hashes, err := en.child.getDirtyHashes() + if err != nil { + return nil, err + } + + dirtyHashes = append(dirtyHashes, hashes...) + dirtyHashes = append(dirtyHashes, en.hash) + return dirtyHashes, nil +} + +func (en *extensionNode) getChildren(db data.DBWriteCacher) ([]node, error) { + err := en.isEmptyOrNil() + if err != nil { + return nil, err + } + + nextNodes := make([]node, 0) + + err = resolveIfCollapsed(en, 0, db) + if err != nil { + return nil, err + } + + nextNodes = append(nextNodes, en.child) + + return nextNodes, nil +} + +func (en *extensionNode) isValid() bool { + if len(en.EncodedChild) == 0 && en.child == nil { + return false + } + + if len(en.Key) == 0 { + return false + } + + return true +} + +func (en *extensionNode) setDirty(dirty bool) { + en.dirty = dirty +} + +func (en *extensionNode) loadChildren(syncer *trieSyncer) error { + err := en.isEmptyOrNil() + if err != nil { + return err + } + + if en.EncodedChild == nil { + return ErrNilNode + } + + child, err := syncer.getNode(en.EncodedChild) + if err != nil { + return err + } + en.child = child + + syncer.interceptedNodes.Remove(en.hash) + + return nil +} + func (en *extensionNode) getAllLeaves(leaves map[string][]byte, key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { err := en.isEmptyOrNil() if err != nil { return err } - err = resolveIfCollapsed(en, 0, db, marshalizer) + err = resolveIfCollapsed(en, 0, db) if err != nil { return err } diff --git a/data/trie/extensionNode_test.go b/data/trie/extensionNode_test.go index 56554ee450e..bc6a0c3f28a 100644 --- a/data/trie/extensionNode_test.go +++ b/data/trie/extensionNode_test.go @@ -5,176 +5,192 @@ import ( "fmt" "reflect" "testing" + "time" "github.com/ElrondNetwork/elrond-go/data/mock" protobuf "github.com/ElrondNetwork/elrond-go/data/trie/proto" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/stretchr/testify/assert" ) func getEnAndCollapsedEn() (*extensionNode, *extensionNode) { - marsh, hasher := getTestMarshAndHasher() - child, collapsedChild := getBnAndCollapsedBn() - en := newExtensionNode([]byte("d"), child) + child, collapsedChild := getBnAndCollapsedBn(getTestMarshAndHasher()) + en, _ := newExtensionNode([]byte("d"), child, child.marsh, child.hasher) - childHash, _ := encodeNodeAndGetHash(collapsedChild, marsh, hasher) - collapsedEn := &extensionNode{CollapsedEn: protobuf.CollapsedEn{Key: []byte("d"), EncodedChild: childHash}} + childHash, _ := encodeNodeAndGetHash(collapsedChild) + collapsedEn := &extensionNode{CollapsedEn: protobuf.CollapsedEn{Key: []byte("d"), EncodedChild: childHash}, baseNode: &baseNode{}} + collapsedEn.marsh = child.marsh + collapsedEn.hasher = child.hasher return en, collapsedEn } func TestExtensionNode_newExtensionNode(t *testing.T) { t.Parallel() - bn, _ := getBnAndCollapsedBn() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) expectedEn := &extensionNode{ CollapsedEn: protobuf.CollapsedEn{ Key: []byte("dog"), EncodedChild: nil, }, child: bn, - hash: nil, - dirty: true, + baseNode: &baseNode{ + dirty: true, + marsh: bn.marsh, + hasher: bn.hasher, + }, } - en := newExtensionNode([]byte("dog"), bn) + en, _ := newExtensionNode([]byte("dog"), bn, bn.marsh, bn.hasher) assert.Equal(t, expectedEn, en) } func TestExtensionNode_getHash(t *testing.T) { t.Parallel() - en := &extensionNode{hash: []byte("test hash")} + + en := &extensionNode{baseNode: &baseNode{hash: []byte("test hash")}} assert.Equal(t, en.hash, en.getHash()) } func TestExtensionNode_isDirty(t *testing.T) { t.Parallel() - en := &extensionNode{dirty: true} + + en := &extensionNode{baseNode: &baseNode{dirty: true}} assert.Equal(t, true, en.isDirty()) - en = &extensionNode{dirty: false} + en = &extensionNode{baseNode: &baseNode{dirty: false}} assert.Equal(t, false, en.isDirty()) } func TestExtensionNode_getCollapsed(t *testing.T) { t.Parallel() + en, collapsedEn := getEnAndCollapsedEn() collapsedEn.dirty = true - marsh, hasher := getTestMarshAndHasher() - collapsed, err := en.getCollapsed(marsh, hasher) + collapsed, err := en.getCollapsed() assert.Nil(t, err) assert.Equal(t, collapsedEn, collapsed) } func TestExtensionNode_getCollapsedEmptyNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + en := &extensionNode{} - collapsed, err := en.getCollapsed(marsh, hasher) + collapsed, err := en.getCollapsed() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, collapsed) } func TestExtensionNode_getCollapsedNilNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + var en *extensionNode - collapsed, err := en.getCollapsed(marsh, hasher) + collapsed, err := en.getCollapsed() assert.Equal(t, ErrNilNode, err) assert.Nil(t, collapsed) } func TestExtensionNode_getCollapsedCollapsedNode(t *testing.T) { t.Parallel() + _, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - collapsed, err := collapsedEn.getCollapsed(marsh, hasher) + collapsed, err := collapsedEn.getCollapsed() assert.Nil(t, err) assert.Equal(t, collapsedEn, collapsed) } func TestExtensionNode_setHash(t *testing.T) { t.Parallel() - en, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) + en, collapsedEn := getEnAndCollapsedEn() + hash, _ := encodeNodeAndGetHash(collapsedEn) - err := en.setHash(marsh, hasher) + err := en.setHash() assert.Nil(t, err) assert.Equal(t, hash, en.hash) } func TestExtensionNode_setHashEmptyNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - en := &extensionNode{} - err := en.setHash(marsh, hasher) + en := &extensionNode{baseNode: &baseNode{}} + + err := en.setHash() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, en.hash) } func TestExtensionNode_setHashNilNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + var en *extensionNode - err := en.setHash(marsh, hasher) + err := en.setHash() assert.Equal(t, ErrNilNode, err) assert.Nil(t, en) } func TestExtensionNode_setHashCollapsedNode(t *testing.T) { t.Parallel() - _, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) + _, collapsedEn := getEnAndCollapsedEn() + hash, _ := encodeNodeAndGetHash(collapsedEn) - err := collapsedEn.setHash(marsh, hasher) + err := collapsedEn.setHash() assert.Nil(t, err) assert.Equal(t, hash, collapsedEn.hash) } +func TestExtensionNode_setGivenHash(t *testing.T) { + t.Parallel() + + en := &extensionNode{baseNode: &baseNode{}} + expectedHash := []byte("node hash") + + en.setGivenHash(expectedHash) + assert.Equal(t, expectedHash, en.hash) +} + func TestExtensionNode_hashChildren(t *testing.T) { t.Parallel() - en, _ := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() + en, _ := getEnAndCollapsedEn() assert.Nil(t, en.child.getHash()) - err := en.hashChildren(marsh, hasher) + err := en.hashChildren() assert.Nil(t, err) - childHash, _ := encodeNodeAndGetHash(en.child, marsh, hasher) + childHash, _ := encodeNodeAndGetHash(en.child) assert.Equal(t, childHash, en.child.getHash()) } func TestExtensionNode_hashChildrenEmptyNode(t *testing.T) { t.Parallel() + en := &extensionNode{} - marsh, hasher := getTestMarshAndHasher() - err := en.hashChildren(marsh, hasher) + err := en.hashChildren() assert.Equal(t, ErrEmptyNode, err) } func TestExtensionNode_hashChildrenNilNode(t *testing.T) { t.Parallel() + var en *extensionNode - marsh, hasher := getTestMarshAndHasher() - err := en.hashChildren(marsh, hasher) + err := en.hashChildren() assert.Equal(t, ErrNilNode, err) } func TestExtensionNode_hashChildrenCollapsedNode(t *testing.T) { t.Parallel() + _, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - err := collapsedEn.hashChildren(marsh, hasher) + err := collapsedEn.hashChildren() assert.Nil(t, err) _, collapsedEn2 := getEnAndCollapsedEn() @@ -183,275 +199,279 @@ func TestExtensionNode_hashChildrenCollapsedNode(t *testing.T) { func TestExtensionNode_hashNode(t *testing.T) { t.Parallel() + _, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() + expectedHash, _ := encodeNodeAndGetHash(collapsedEn) - expectedHash, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) - hash, err := collapsedEn.hashNode(marsh, hasher) + hash, err := collapsedEn.hashNode() assert.Nil(t, err) assert.Equal(t, expectedHash, hash) } func TestExtensionNode_hashNodeEmptyNode(t *testing.T) { t.Parallel() + en := &extensionNode{} - marsh, hasher := getTestMarshAndHasher() - hash, err := en.hashNode(marsh, hasher) + hash, err := en.hashNode() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, hash) } func TestExtensionNode_hashNodeNilNode(t *testing.T) { t.Parallel() + var en *extensionNode - marsh, hasher := getTestMarshAndHasher() - hash, err := en.hashNode(marsh, hasher) + hash, err := en.hashNode() assert.Equal(t, ErrNilNode, err) assert.Nil(t, hash) } func TestExtensionNode_commit(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - en, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) - _ = en.setHash(marsh, hasher) + db := mock.NewMemDbMock() + en, collapsedEn := getEnAndCollapsedEn() + hash, _ := encodeNodeAndGetHash(collapsedEn) + _ = en.setHash() - err := en.commit(0, db, marsh, hasher) + err := en.commit(false, 0, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) - node, _ := decodeNode(encNode, marsh) + node, _ := decodeNode(encNode, en.marsh, en.hasher) - h1, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + h1, _ := encodeNodeAndGetHash(collapsedEn) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestExtensionNode_commitEmptyNode(t *testing.T) { t.Parallel() + en := &extensionNode{} - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - err := en.commit(0, db, marsh, hasher) + err := en.commit(false, 0, nil, nil) assert.Equal(t, ErrEmptyNode, err) } func TestExtensionNode_commitNilNode(t *testing.T) { t.Parallel() + var en *extensionNode - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - err := en.commit(0, db, marsh, hasher) + err := en.commit(false, 0, nil, nil) assert.Equal(t, ErrNilNode, err) } func TestExtensionNode_commitCollapsedNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - _, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) - _ = collapsedEn.setHash(marsh, hasher) + db := mock.NewMemDbMock() + _, collapsedEn := getEnAndCollapsedEn() + hash, _ := encodeNodeAndGetHash(collapsedEn) + _ = collapsedEn.setHash() collapsedEn.dirty = true - err := collapsedEn.commit(0, db, marsh, hasher) + err := collapsedEn.commit(false, 0, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) - node, _ := decodeNode(encNode, marsh) + node, _ := decodeNode(encNode, collapsedEn.marsh, collapsedEn.hasher) collapsedEn.hash = nil - h1, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + h1, _ := encodeNodeAndGetHash(collapsedEn) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestExtensionNode_getEncodedNode(t *testing.T) { t.Parallel() - en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() - expectedEncodedNode, _ := marsh.Marshal(en) + en, _ := getEnAndCollapsedEn() + expectedEncodedNode, _ := en.marsh.Marshal(en) expectedEncodedNode = append(expectedEncodedNode, extension) - encNode, err := en.getEncodedNode(marsh) + encNode, err := en.getEncodedNode() assert.Nil(t, err) assert.Equal(t, expectedEncodedNode, encNode) } func TestExtensionNode_getEncodedNodeEmpty(t *testing.T) { t.Parallel() + en := &extensionNode{} - marsh, _ := getTestMarshAndHasher() - encNode, err := en.getEncodedNode(marsh) + encNode, err := en.getEncodedNode() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, encNode) } func TestExtensionNode_getEncodedNodeNil(t *testing.T) { t.Parallel() + var en *extensionNode - marsh, _ := getTestMarshAndHasher() - encNode, err := en.getEncodedNode(marsh) + encNode, err := en.getEncodedNode() assert.Equal(t, ErrNilNode, err) assert.Nil(t, encNode) } func TestExtensionNode_resolveCollapsed(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - en, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - _ = en.setHash(marsh, hasher) - _ = en.commit(0, db, marsh, hasher) - _, resolved := getBnAndCollapsedBn() + db := mock.NewMemDbMock() + en, collapsedEn := getEnAndCollapsedEn() + _ = en.setHash() + _ = en.commit(false, 0, db, db) + _, resolved := getBnAndCollapsedBn(en.marsh, en.hasher) - err := collapsedEn.resolveCollapsed(0, db, marsh) + err := collapsedEn.resolveCollapsed(0, db) assert.Nil(t, err) + assert.Equal(t, en.child.getHash(), collapsedEn.child.getHash()) - h1, _ := encodeNodeAndGetHash(resolved, marsh, hasher) - h2, _ := encodeNodeAndGetHash(collapsedEn.child, marsh, hasher) + h1, _ := encodeNodeAndGetHash(resolved) + h2, _ := encodeNodeAndGetHash(collapsedEn.child) assert.Equal(t, h1, h2) } func TestExtensionNode_resolveCollapsedEmptyNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en := &extensionNode{} - marsh, _ := getTestMarshAndHasher() - err := en.resolveCollapsed(0, db, marsh) + err := en.resolveCollapsed(0, nil) assert.Equal(t, ErrEmptyNode, err) } -func TestExtensionNode_resolveCollapsedENilNode(t *testing.T) { +func TestExtensionNode_resolveCollapsedNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var en *extensionNode - marsh, _ := getTestMarshAndHasher() - err := en.resolveCollapsed(2, db, marsh) + err := en.resolveCollapsed(2, nil) assert.Equal(t, ErrNilNode, err) } func TestExtensionNode_isCollapsed(t *testing.T) { t.Parallel() - en, collapsedEn := getEnAndCollapsedEn() + en, collapsedEn := getEnAndCollapsedEn() assert.True(t, collapsedEn.isCollapsed()) assert.False(t, en.isCollapsed()) - collapsedEn.child = newLeafNode([]byte("og"), []byte("dog")) + collapsedEn.child, _ = newLeafNode([]byte("og"), []byte("dog"), en.marsh, en.hasher) assert.False(t, collapsedEn.isCollapsed()) } func TestExtensionNode_tryGet(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() + dogBytes := []byte("dog") - key := []byte{100, 2, 100, 111, 103} - val, err := en.tryGet(key, db, marsh) - assert.Equal(t, []byte("dog"), val) + enKey := []byte{100} + bnKey := []byte{2} + lnKey := dogBytes + key := append(enKey, bnKey...) + key = append(key, lnKey...) + + val, err := en.tryGet(key, nil) + assert.Equal(t, dogBytes, val) assert.Nil(t, err) } func TestExtensionNode_tryGetEmptyKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() + en, _ := getEnAndCollapsedEn() var key []byte - val, err := en.tryGet(key, db, marsh) + + val, err := en.tryGet(key, nil) assert.Nil(t, err) assert.Nil(t, val) } func TestExtensionNode_tryGetWrongKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() + key := []byte("gdo") - key := []byte{103, 100, 111} - val, err := en.tryGet(key, db, marsh) + val, err := en.tryGet(key, nil) assert.Nil(t, err) assert.Nil(t, val) } func TestExtensionNode_tryGetCollapsedNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + + db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - _ = en.setHash(marsh, hasher) - _ = en.commit(0, db, marsh, hasher) + _ = en.setHash() + _ = en.commit(false, 0, db, db) - key := []byte{100, 2, 100, 111, 103} - val, err := collapsedEn.tryGet(key, db, marsh) + enKey := []byte{100} + bnKey := []byte{2} + lnKey := []byte("dog") + key := append(enKey, bnKey...) + key = append(key, lnKey...) + + val, err := collapsedEn.tryGet(key, db) assert.Equal(t, []byte("dog"), val) assert.Nil(t, err) } func TestExtensionNode_tryGetEmptyNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en := &extensionNode{} - marsh, _ := getTestMarshAndHasher() + key := []byte("dog") - key := []byte{100, 111, 103} - val, err := en.tryGet(key, db, marsh) + val, err := en.tryGet(key, nil) assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, val) } func TestExtensionNode_tryGetNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var en *extensionNode - marsh, _ := getTestMarshAndHasher() + key := []byte("dog") - key := []byte{100, 111, 103} - val, err := en.tryGet(key, db, marsh) + val, err := en.tryGet(key, nil) assert.Equal(t, ErrNilNode, err) assert.Nil(t, val) } func TestExtensionNode_getNext(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() - nextNode, _ := getBnAndCollapsedBn() - key := []byte{100, 2, 100, 111, 103} + nextNode, _ := getBnAndCollapsedBn(en.marsh, en.hasher) + + enKey := []byte{100} + bnKey := []byte{2} + lnKey := []byte("dog") + key := append(enKey, bnKey...) + key = append(key, lnKey...) - node, key, err := en.getNext(key, db, marsh) + node, newKey, err := en.getNext(key, nil) assert.Equal(t, nextNode, node) - assert.Equal(t, []byte{2, 100, 111, 103}, key) + assert.Equal(t, key[1:], newKey) assert.Nil(t, err) } func TestExtensionNode_getNextWrongKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() - key := []byte{2, 100, 111, 103} + bnKey := []byte{2} + lnKey := []byte("dog") + key := append(bnKey, lnKey...) - node, key, err := en.getNext(key, db, marsh) + node, key, err := en.getNext(key, nil) assert.Nil(t, node) assert.Nil(t, key) assert.Equal(t, ErrNodeNotFound, err) @@ -459,42 +479,112 @@ func TestExtensionNode_getNextWrongKey(t *testing.T) { func TestExtensionNode_insert(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() - node := newLeafNode([]byte{100, 15, 5, 6}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() + key := []byte{100, 15, 5, 6} + node, _ := newLeafNode(key, []byte("dogs"), en.marsh, en.hasher) - dirty, newNode, err := en.insert(node, db, marsh) + dirty, newNode, _, err := en.insert(node, nil) assert.True(t, dirty) assert.Nil(t, err) - val, _ := newNode.tryGet([]byte{100, 15, 5, 6}, db, marsh) + + val, _ := newNode.tryGet(key, nil) assert.Equal(t, []byte("dogs"), val) } func TestExtensionNode_insertCollapsedNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + + db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() - node := newLeafNode([]byte{100, 15, 5, 6}, []byte("dogs")) - marsh, hasher := getTestMarshAndHasher() - _ = en.setHash(marsh, hasher) - _ = en.commit(0, db, marsh, hasher) + key := []byte{100, 15, 5, 6} + node, _ := newLeafNode(key, []byte("dogs"), en.marsh, en.hasher) + + _ = en.setHash() + _ = en.commit(false, 0, db, db) - dirty, newNode, err := collapsedEn.insert(node, db, marsh) + dirty, newNode, _, err := collapsedEn.insert(node, db) assert.True(t, dirty) assert.Nil(t, err) - val, _ := newNode.tryGet([]byte{100, 15, 5, 6}, db, marsh) + + val, _ := newNode.tryGet(key, db) assert.Equal(t, []byte("dogs"), val) } +func TestExtensionNode_insertInStoredEnSameKey(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() + enKey := []byte{100} + key := append(enKey, []byte{11, 12}...) + node, _ := newLeafNode(key, []byte("dogs"), en.marsh, en.hasher) + + _ = en.commit(false, 0, db, db) + enHash := en.getHash() + bn, _, _ := en.getNext(enKey, db) + bnHash := bn.getHash() + expectedHashes := [][]byte{bnHash, enHash} + + dirty, _, oldHashes, err := en.insert(node, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, expectedHashes, oldHashes) +} + +func TestExtensionNode_insertInStoredEnDifferentKey(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + enKey := []byte{1} + en, _ := newExtensionNode(enKey, bn, bn.marsh, bn.hasher) + nodeKey := []byte{11, 12} + node, _ := newLeafNode(nodeKey, []byte("dogs"), bn.marsh, bn.hasher) + + _ = en.commit(false, 0, db, db) + expectedHashes := [][]byte{en.getHash()} + + dirty, _, oldHashes, err := en.insert(node, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, expectedHashes, oldHashes) +} + +func TestExtensionNode_insertInDirtyEnSameKey(t *testing.T) { + t.Parallel() + + en, _ := getEnAndCollapsedEn() + nodeKey := []byte{100, 11, 12} + node, _ := newLeafNode(nodeKey, []byte("dogs"), en.marsh, en.hasher) + + dirty, _, oldHashes, err := en.insert(node, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + +func TestExtensionNode_insertInDirtyEnDifferentKey(t *testing.T) { + t.Parallel() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) + enKey := []byte{1} + en, _ := newExtensionNode(enKey, bn, bn.marsh, bn.hasher) + nodeKey := []byte{11, 12} + node, _ := newLeafNode(nodeKey, []byte("dogs"), bn.marsh, bn.hasher) + + dirty, _, oldHashes, err := en.insert(node, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + func TestExtensionNode_insertInNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var en *extensionNode - node := newLeafNode([]byte{0, 2, 3}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := en.insert(node, db, marsh) + dirty, newNode, _, err := en.insert(&leafNode{}, nil) assert.False(t, dirty) assert.Equal(t, ErrNilNode, err) assert.Nil(t, newNode) @@ -502,27 +592,67 @@ func TestExtensionNode_insertInNilNode(t *testing.T) { func TestExtensionNode_delete(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() + dogBytes := []byte("dog") - val, _ := en.tryGet([]byte{100, 2, 100, 111, 103}, db, marsh) - assert.Equal(t, []byte("dog"), val) + enKey := []byte{100} + bnKey := []byte{2} + lnKey := dogBytes + key := append(enKey, bnKey...) + key = append(key, lnKey...) + + val, _ := en.tryGet(key, nil) + assert.Equal(t, dogBytes, val) - dirty, _, err := en.delete([]byte{100, 2, 100, 111, 103}, db, marsh) + dirty, _, _, err := en.delete(key, nil) assert.True(t, dirty) assert.Nil(t, err) - val, _ = en.tryGet([]byte{100, 2, 100, 111, 103}, db, marsh) + val, _ = en.tryGet(key, nil) assert.Nil(t, val) } +func TestExtensionNode_deleteFromStoredEn(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() + enKey := []byte{100} + bnKey := []byte{2} + lnKey := []byte("dog") + key := append(enKey, bnKey...) + key = append(key, lnKey...) + lnPathKey := key + + _ = en.commit(false, 0, db, db) + bn, key, _ := en.getNext(key, db) + ln, _, _ := bn.getNext(key, db) + expectedHashes := [][]byte{ln.getHash(), bn.getHash(), en.getHash()} + + dirty, _, oldHashes, err := en.delete(lnPathKey, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, expectedHashes, oldHashes) +} + +func TestExtensionNode_deleteFromDirtyEn(t *testing.T) { + t.Parallel() + + en, _ := getEnAndCollapsedEn() + lnKey := []byte{100, 2, 100, 111, 103} + + dirty, _, oldHashes, err := en.delete(lnKey, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + func TestExtendedNode_deleteEmptyNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en := &extensionNode{} - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := en.delete([]byte{100, 111, 103}, db, marsh) + dirty, newNode, _, err := en.delete([]byte("dog"), nil) assert.False(t, dirty) assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, newNode) @@ -530,11 +660,10 @@ func TestExtendedNode_deleteEmptyNode(t *testing.T) { func TestExtensionNode_deleteNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var en *extensionNode - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := en.delete([]byte{100, 111, 103}, db, marsh) + dirty, newNode, _, err := en.delete([]byte("dog"), nil) assert.False(t, dirty) assert.Equal(t, ErrNilNode, err) assert.Nil(t, newNode) @@ -542,11 +671,10 @@ func TestExtensionNode_deleteNilNode(t *testing.T) { func TestExtensionNode_deleteEmptykey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + en, _ := getEnAndCollapsedEn() - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := en.delete([]byte{}, db, marsh) + dirty, newNode, _, err := en.delete([]byte{}, nil) assert.False(t, dirty) assert.Equal(t, ErrValueTooShort, err) assert.Nil(t, newNode) @@ -554,32 +682,46 @@ func TestExtensionNode_deleteEmptykey(t *testing.T) { func TestExtensionNode_deleteCollapsedNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + + db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() - marsh, hasher := getTestMarshAndHasher() - _ = en.setHash(marsh, hasher) - _ = en.commit(0, db, marsh, hasher) + _ = en.setHash() + _ = en.commit(false, 0, db, db) + + enKey := []byte{100} + bnKey := []byte{2} + lnKey := []byte("dog") + key := append(enKey, bnKey...) + key = append(key, lnKey...) - val, _ := en.tryGet([]byte{100, 2, 100, 111, 103}, db, marsh) + val, _ := en.tryGet(key, db) assert.Equal(t, []byte("dog"), val) - dirty, newNode, err := collapsedEn.delete([]byte{100, 2, 100, 111, 103}, db, marsh) + dirty, newNode, _, err := collapsedEn.delete(key, db) assert.True(t, dirty) assert.Nil(t, err) - val, _ = newNode.tryGet([]byte{100, 2, 100, 111, 103}, db, marsh) + val, _ = newNode.tryGet(key, db) assert.Nil(t, val) } func TestExtensionNode_reduceNode(t *testing.T) { t.Parallel() - en := &extensionNode{CollapsedEn: protobuf.CollapsedEn{Key: []byte{100, 111, 103}}} - expected := &extensionNode{CollapsedEn: protobuf.CollapsedEn{Key: []byte{2, 100, 111, 103}}, dirty: true} - node := en.reduceNode(2) + + marsh, hasher := getTestMarshAndHasher() + en, _ := newExtensionNode([]byte{100, 111, 103}, nil, marsh, hasher) + + expected := &extensionNode{CollapsedEn: protobuf.CollapsedEn{Key: []byte{2, 100, 111, 103}}, baseNode: &baseNode{dirty: true}} + expected.marsh = en.marsh + expected.hasher = en.hasher + + node, err := en.reduceNode(2) assert.Equal(t, expected, node) + assert.Nil(t, err) } func TestExtensionNode_clone(t *testing.T) { t.Parallel() + en, _ := getEnAndCollapsedEn() clone := en.clone() assert.False(t, en == clone) @@ -588,6 +730,7 @@ func TestExtensionNode_clone(t *testing.T) { func TestExtensionNode_isEmptyOrNil(t *testing.T) { t.Parallel() + en := &extensionNode{} assert.Equal(t, ErrEmptyNode, en.isEmptyOrNil()) @@ -600,12 +743,12 @@ func TestExtensionNode_isEmptyOrNil(t *testing.T) { func TestExtensionNode_deepCloneNilHashShouldWork(t *testing.T) { t.Parallel() - en := &extensionNode{} + en := &extensionNode{baseNode: &baseNode{}} en.dirty = true en.hash = nil en.EncodedChild = getRandomByteSlice() en.Key = getRandomByteSlice() - en.child = &leafNode{} + en.child = &leafNode{baseNode: &baseNode{}} cloned := en.deepClone().(*extensionNode) @@ -615,12 +758,12 @@ func TestExtensionNode_deepCloneNilHashShouldWork(t *testing.T) { func TestExtensionNode_deepCloneNilEncodedChildShouldWork(t *testing.T) { t.Parallel() - en := &extensionNode{} + en := &extensionNode{baseNode: &baseNode{}} en.dirty = true en.hash = getRandomByteSlice() en.EncodedChild = nil en.Key = getRandomByteSlice() - en.child = &leafNode{} + en.child = &leafNode{baseNode: &baseNode{}} cloned := en.deepClone().(*extensionNode) @@ -630,22 +773,105 @@ func TestExtensionNode_deepCloneNilEncodedChildShouldWork(t *testing.T) { func TestExtensionNode_deepCloneNilKeyShouldWork(t *testing.T) { t.Parallel() - en := &extensionNode{} + en := &extensionNode{baseNode: &baseNode{}} en.dirty = true en.hash = getRandomByteSlice() en.EncodedChild = getRandomByteSlice() en.Key = nil - en.child = &leafNode{} + en.child = &leafNode{baseNode: &baseNode{}} cloned := en.deepClone().(*extensionNode) testSameExtensionNodeContent(t, en, cloned) } +func TestExtensionNode_getChildren(t *testing.T) { + t.Parallel() + + en, _ := getEnAndCollapsedEn() + + children, err := en.getChildren(nil) + assert.Nil(t, err) + assert.Equal(t, 1, len(children)) +} + +func TestExtensionNode_getChildrenCollapsedEn(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + en, collapsedEn := getEnAndCollapsedEn() + _ = en.commit(true, 0, db, db) + + children, err := collapsedEn.getChildren(db) + assert.Nil(t, err) + assert.Equal(t, 1, len(children)) +} + +func TestExtensionNode_isValid(t *testing.T) { + t.Parallel() + + en, _ := getEnAndCollapsedEn() + assert.True(t, en.isValid()) + + en.child = nil + assert.False(t, en.isValid()) +} + +func TestExtensionNode_setDirty(t *testing.T) { + t.Parallel() + + en := &extensionNode{baseNode: &baseNode{}} + en.setDirty(true) + + assert.True(t, en.dirty) +} + +func TestExtensionNode_loadChildren(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + tr := initTrie() + nodes, hashes := getEncodedTrieNodesAndHashes(tr) + nodesCacher, _ := lrucache.NewCache(100) + resolver := &mock.TrieNodesResolverStub{ + RequestDataFromHashCalled: func(hash []byte) error { + for i := range nodes { + node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) + nodesCacher.Put(node.hash, node) + } + return nil + }, + } + syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second) + syncer.interceptedNodes.RegisterHandler(func(key []byte) { + syncer.chRcvTrieNodes <- true + }) + + enHashPosition := 0 + enKey := []byte{6, 4, 6, 15, 6} + childPosition := 1 + childHash := hashes[childPosition] + en := &extensionNode{ + CollapsedEn: protobuf.CollapsedEn{ + Key: enKey, + EncodedChild: childHash, + }, + baseNode: &baseNode{ + hash: hashes[enHashPosition], + }, + } + + err := en.loadChildren(syncer) + assert.Nil(t, err) + assert.NotNil(t, en.child) + + assert.Equal(t, 5, nodesCacher.Len()) +} + func TestExtensionNode_deepCloneNilChildShouldWork(t *testing.T) { t.Parallel() - en := &extensionNode{} + en := &extensionNode{baseNode: &baseNode{}} en.dirty = true en.hash = getRandomByteSlice() en.EncodedChild = getRandomByteSlice() @@ -660,12 +886,12 @@ func TestExtensionNode_deepCloneNilChildShouldWork(t *testing.T) { func TestExtensionNode_deepCloneShouldWork(t *testing.T) { t.Parallel() - en := &extensionNode{} + en := &extensionNode{baseNode: &baseNode{}} en.dirty = true en.hash = getRandomByteSlice() en.EncodedChild = getRandomByteSlice() en.Key = getRandomByteSlice() - en.child = &leafNode{} + en.child = &leafNode{baseNode: &baseNode{}} cloned := en.deepClone().(*extensionNode) diff --git a/data/trie/factory/trieCreator.go b/data/trie/factory/trieCreator.go new file mode 100644 index 00000000000..ba83c2d6bea --- /dev/null +++ b/data/trie/factory/trieCreator.go @@ -0,0 +1,105 @@ +package factory + +import ( + "path" + "path/filepath" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/trie/evictionWaitingList" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +type trieCreator struct { + trieStorage data.StorageManager + msh marshal.Marshalizer + hsh hashing.Hasher +} + +var log = logger.GetOrCreate("trie") + +// NewTrieFactory creates a new trie factory +func NewTrieFactory( + args TrieFactoryArgs, +) (*trieCreator, error) { + if check.IfNil(args.Marshalizer) { + return nil, trie.ErrNilMarshalizer + } + if check.IfNil(args.Hasher) { + return nil, trie.ErrNilHasher + } + if check.IfNil(args.PathManager) { + return nil, trie.ErrNilPathManager + } + + trieStoragePath, mainDb := path.Split(args.PathManager.PathForStatic(args.ShardId, args.Cfg.DB.FilePath)) + + dbConfig := storageFactory.GetDBFromConfig(args.Cfg.DB) + dbConfig.FilePath = path.Join(trieStoragePath, mainDb) + accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( + storageFactory.GetCacherFromConfig(args.Cfg.Cache), + dbConfig, + storageFactory.GetBloomFromConfig(args.Cfg.Bloom), + ) + if err != nil { + return nil, err + } + + log.Trace("trie pruning status", "enabled", args.PruningEnabled) + if !args.PruningEnabled { + trieStorage, err := trie.NewTrieStorageManagerWithoutPruning(accountsTrieStorage) + if err != nil { + return nil, err + } + + return &trieCreator{ + trieStorage: trieStorage, + msh: args.Marshalizer, + hsh: args.Hasher, + }, nil + } + + evictionDb, err := storageUnit.NewDB( + storageUnit.DBType(args.EvictionWaitingListCfg.DB.Type), + filepath.Join(trieStoragePath, args.EvictionWaitingListCfg.DB.FilePath), + args.EvictionWaitingListCfg.DB.MaxBatchSize, + args.EvictionWaitingListCfg.DB.BatchDelaySeconds, + args.EvictionWaitingListCfg.DB.MaxOpenFiles, + ) + if err != nil { + return nil, err + } + + ewl, err := evictionWaitingList.NewEvictionWaitingList(args.EvictionWaitingListCfg.Size, evictionDb, args.Marshalizer) + if err != nil { + return nil, err + } + + args.SnapshotDbCfg.FilePath = filepath.Join(trieStoragePath, args.SnapshotDbCfg.FilePath) + + trieStorage, err := trie.NewTrieStorageManager(accountsTrieStorage, &args.SnapshotDbCfg, ewl) + if err != nil { + return nil, err + } + + return &trieCreator{ + trieStorage: trieStorage, + msh: args.Marshalizer, + hsh: args.Hasher, + }, nil +} + +// Create creates a new trie +func (tc *trieCreator) Create() (data.Trie, error) { + return trie.NewTrie(tc.trieStorage, tc.msh, tc.hsh) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tc *trieCreator) IsInterfaceNil() bool { + return tc == nil +} diff --git a/data/trie/factory/trieCreator_test.go b/data/trie/factory/trieCreator_test.go new file mode 100644 index 00000000000..d8ebd14abab --- /dev/null +++ b/data/trie/factory/trieCreator_test.go @@ -0,0 +1,3 @@ +package factory + +// TODO add tests diff --git a/data/trie/factory/trieFactoryArgs.go b/data/trie/factory/trieFactoryArgs.go new file mode 100644 index 00000000000..724506e3930 --- /dev/null +++ b/data/trie/factory/trieFactoryArgs.go @@ -0,0 +1,20 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// TrieFactoryArgs holds arguments for creating a trie factory +type TrieFactoryArgs struct { + Cfg config.StorageConfig + EvictionWaitingListCfg config.EvictionWaitingListConfig + SnapshotDbCfg config.DBConfig + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + PathManager storage.PathManagerHandler + ShardId string + PruningEnabled bool +} diff --git a/data/trie/interceptedNode.go b/data/trie/interceptedNode.go new file mode 100644 index 00000000000..60d4a09bb58 --- /dev/null +++ b/data/trie/interceptedNode.go @@ -0,0 +1,90 @@ +package trie + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" +) + +// InterceptedTrieNode implements intercepted data interface and is used when trie nodes are intercepted +type InterceptedTrieNode struct { + node node + encNode []byte + hash []byte + mutex sync.Mutex +} + +// NewInterceptedTrieNode creates a new instance of InterceptedTrieNode +func NewInterceptedTrieNode( + buff []byte, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, +) (*InterceptedTrieNode, error) { + if len(buff) == 0 { + return nil, ErrValueTooShort + } + if check.IfNil(marshalizer) { + return nil, ErrNilMarshalizer + } + if check.IfNil(hasher) { + return nil, ErrNilHasher + } + + n, err := decodeNode(buff, marshalizer, hasher) + if err != nil { + return nil, err + } + n.setDirty(true) + + err = n.setHash() + if err != nil { + return nil, err + } + + return &InterceptedTrieNode{ + node: n, + encNode: buff, + hash: n.getHash(), + }, nil +} + +// CheckValidity checks if the intercepted data is valid +func (inTn *InterceptedTrieNode) CheckValidity() error { + if inTn.node.isValid() { + return nil + } + return ErrInvalidNode +} + +// IsForCurrentShard checks if the intercepted data is for the current shard +func (inTn *InterceptedTrieNode) IsForCurrentShard() bool { + return true +} + +// Hash returns the hash of the intercepted node +func (inTn *InterceptedTrieNode) Hash() []byte { + inTn.mutex.Lock() + defer inTn.mutex.Unlock() + + return inTn.hash +} + +// IsInterfaceNil returns true if there is no value under the interface +func (inTn *InterceptedTrieNode) IsInterfaceNil() bool { + return inTn == nil +} + +// EncodedNode returns the intercepted encoded node +func (inTn *InterceptedTrieNode) EncodedNode() []byte { + return inTn.encNode +} + +// CreateEndOfProcessingTriggerNode changes the hash of the current node by appending the hash to the current hash. +// This construction will be used to trigger the end of processing for all of the received data +func (inTn *InterceptedTrieNode) CreateEndOfProcessingTriggerNode() { + inTn.mutex.Lock() + inTn.hash = append(inTn.hash, inTn.hash...) + inTn.mutex.Unlock() +} diff --git a/data/trie/interceptedNode_test.go b/data/trie/interceptedNode_test.go new file mode 100644 index 00000000000..eee6239a724 --- /dev/null +++ b/data/trie/interceptedNode_test.go @@ -0,0 +1,108 @@ +package trie_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/stretchr/testify/assert" +) + +func getDefaultInterceptedTrieNodeParameters() ([]byte, marshal.Marshalizer, hashing.Hasher) { + tr := initTrie() + nodes, _ := getEncodedTrieNodesAndHashes(tr) + + return nodes[0], &mock.ProtobufMarshalizerMock{}, &mock.KeccakMock{} +} + +func getEncodedTrieNodesAndHashes(tr data.Trie) ([][]byte, [][]byte) { + it, _ := trie.NewIterator(tr) + encNode, _ := it.MarshalizedNode() + + nodes := make([][]byte, 0) + nodes = append(nodes, encNode) + + hashes := make([][]byte, 0) + hash, _ := it.GetHash() + hashes = append(hashes, hash) + + for it.HasNext() { + _ = it.Next() + encNode, _ = it.MarshalizedNode() + + nodes = append(nodes, encNode) + hash, _ = it.GetHash() + hashes = append(hashes, hash) + } + + return nodes, hashes +} + +func TestNewInterceptedTrieNode_EmptyBufferShouldFail(t *testing.T) { + t.Parallel() + + _, marsh, hasher := getDefaultInterceptedTrieNodeParameters() + interceptedNode, err := trie.NewInterceptedTrieNode([]byte{}, marsh, hasher) + assert.Nil(t, interceptedNode) + assert.Equal(t, trie.ErrValueTooShort, err) +} + +func TestNewInterceptedTrieNode_NilMarshalizerShouldFail(t *testing.T) { + t.Parallel() + + buff, _, hasher := getDefaultInterceptedTrieNodeParameters() + interceptedNode, err := trie.NewInterceptedTrieNode(buff, nil, hasher) + assert.Nil(t, interceptedNode) + assert.Equal(t, trie.ErrNilMarshalizer, err) +} + +func TestNewInterceptedTrieNode_NilHasherShouldFail(t *testing.T) { + t.Parallel() + + buff, marsh, _ := getDefaultInterceptedTrieNodeParameters() + interceptedNode, err := trie.NewInterceptedTrieNode(buff, marsh, nil) + assert.Nil(t, interceptedNode) + assert.Equal(t, trie.ErrNilHasher, err) +} + +func TestNewInterceptedTrieNode_OkParametersShouldWork(t *testing.T) { + t.Parallel() + + interceptedNode, err := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + assert.NotNil(t, interceptedNode) + assert.Nil(t, err) +} + +func TestInterceptedTrieNode_CheckValidity(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + + err := interceptedNode.CheckValidity() + assert.Nil(t, err) +} + +func TestInterceptedTrieNode_Hash(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + tr := initTrie() + _, hashes := getEncodedTrieNodesAndHashes(tr) + + hash := interceptedNode.Hash() + assert.Equal(t, hashes[0], hash) +} + +func TestInterceptedTrieNode_EncodedNode(t *testing.T) { + t.Parallel() + + interceptedNode, _ := trie.NewInterceptedTrieNode(getDefaultInterceptedTrieNodeParameters()) + tr := initTrie() + nodes, _ := getEncodedTrieNodesAndHashes(tr) + + encNode := interceptedNode.EncodedNode() + assert.Equal(t, nodes[0], encNode) +} diff --git a/data/trie/interface.go b/data/trie/interface.go new file mode 100644 index 00000000000..0d447bb720e --- /dev/null +++ b/data/trie/interface.go @@ -0,0 +1,54 @@ +package trie + +import ( + "io" + "sync" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" +) + +type node interface { + getHash() []byte + setHash() error + setGivenHash([]byte) + setHashConcurrent(wg *sync.WaitGroup, c chan error) + setRootHash() error + getCollapsed() (node, error) // a collapsed node is a node that instead of the children holds the children hashes + isCollapsed() bool + isPosCollapsed(pos int) bool + isDirty() bool + getEncodedNode() ([]byte, error) + commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error + resolveCollapsed(pos byte, db data.DBWriteCacher) error + hashNode() ([]byte, error) + hashChildren() error + tryGet(key []byte, db data.DBWriteCacher) ([]byte, error) + getNext(key []byte, db data.DBWriteCacher) (node, []byte, error) + insert(n *leafNode, db data.DBWriteCacher) (bool, node, [][]byte, error) + delete(key []byte, db data.DBWriteCacher) (bool, node, [][]byte, error) + reduceNode(pos int) (node, error) + isEmptyOrNil() error + print(writer io.Writer, index int) + deepClone() node + getDirtyHashes() ([][]byte, error) + getChildren(db data.DBWriteCacher) ([]node, error) + isValid() bool + setDirty(bool) + loadChildren(*trieSyncer) error + getAllLeaves(map[string][]byte, []byte, data.DBWriteCacher, marshal.Marshalizer) error + + getMarshalizer() marshal.Marshalizer + setMarshalizer(marshal.Marshalizer) + getHasher() hashing.Hasher + setHasher(hashing.Hasher) +} + +type snapshotsBuffer interface { + add([]byte, bool) + len() int + removeFirst() + getFirst() *snapshotsQueueEntry + clone() snapshotsBuffer +} diff --git a/data/trie/iterator.go b/data/trie/iterator.go new file mode 100644 index 00000000000..4ba697ca518 --- /dev/null +++ b/data/trie/iterator.go @@ -0,0 +1,80 @@ +package trie + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" +) + +type iterator struct { + currentNode node + nextNodes []node + db data.DBWriteCacher +} + +// NewIterator creates a new instance of trie iterator +func NewIterator(trie data.Trie) (*iterator, error) { + if check.IfNil(trie) { + return nil, ErrNilTrie + } + + pmt, ok := trie.(*patriciaMerkleTrie) + if !ok { + return nil, ErrWrongTypeAssertion + } + + nextNodes, err := pmt.root.getChildren(trie.Database()) + if err != nil { + return nil, err + } + + return &iterator{ + currentNode: pmt.root, + nextNodes: nextNodes, + db: trie.Database(), + }, nil +} + +// HasNext returns true if there is a next node +func (it *iterator) HasNext() bool { + return len(it.nextNodes) > 0 +} + +// Next moves the iterator to the next node +func (it *iterator) Next() error { + n := it.nextNodes[0] + + err := n.isEmptyOrNil() + if err != nil { + return ErrNilNode + } + + it.currentNode = n + nextChildren, err := it.currentNode.getChildren(it.db) + if err != nil { + return err + } + + it.nextNodes = append(it.nextNodes, nextChildren...) + it.nextNodes = it.nextNodes[1:] + return nil +} + +// MarshalizedNode marshalizes the current node, and then returns the serialized node +func (it *iterator) MarshalizedNode() ([]byte, error) { + err := it.currentNode.setHash() + if err != nil { + return nil, err + } + + return it.currentNode.getEncodedNode() +} + +// GetHash returns the current node hash +func (it *iterator) GetHash() ([]byte, error) { + err := it.currentNode.setHash() + if err != nil { + return nil, err + } + + return it.currentNode.getHash(), nil +} diff --git a/data/trie/iterator_test.go b/data/trie/iterator_test.go new file mode 100644 index 00000000000..2fe15b63762 --- /dev/null +++ b/data/trie/iterator_test.go @@ -0,0 +1,81 @@ +package trie_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/stretchr/testify/assert" +) + +func TestNewIterator(t *testing.T) { + t.Parallel() + + tr := initTrie() + + it, err := trie.NewIterator(tr) + assert.Nil(t, err) + assert.NotNil(t, it) +} + +func TestNewIteratorNilTrieShouldErr(t *testing.T) { + t.Parallel() + + var tr data.Trie + + it, err := trie.NewIterator(tr) + assert.Nil(t, it) + assert.Equal(t, trie.ErrNilTrie, err) +} + +func TestIterator_HasNext(t *testing.T) { + t.Parallel() + + tr := emptyTrie() + _ = tr.Update([]byte("dog"), []byte("dog")) + it, _ := trie.NewIterator(tr) + assert.False(t, it.HasNext()) + + _ = tr.Update([]byte("doe"), []byte("doe")) + it, _ = trie.NewIterator(tr) + assert.True(t, it.HasNext()) +} + +func TestIterator_Next(t *testing.T) { + t.Parallel() + + tr := initTrie() + + it, _ := trie.NewIterator(tr) + for it.HasNext() { + err := it.Next() + assert.Nil(t, err) + } +} + +func TestIterator_GetMarshalizedNode(t *testing.T) { + t.Parallel() + + tr := initTrie() + it, _ := trie.NewIterator(tr) + + encNode, err := it.MarshalizedNode() + assert.Nil(t, err) + assert.NotEqual(t, 0, len(encNode)) + + extensionNodeIdentifier := uint8(0) + lastByte := len(encNode) - 1 + assert.Equal(t, extensionNodeIdentifier, encNode[lastByte]) +} + +func TestIterator_GetHash(t *testing.T) { + t.Parallel() + + tr := initTrie() + rootHash, _ := tr.Root() + it, _ := trie.NewIterator(tr) + + hash, err := it.GetHash() + assert.Nil(t, err) + assert.Equal(t, rootHash, hash) +} diff --git a/data/trie/leafNode.go b/data/trie/leafNode.go index 4f0db557eb9..da1286f38dd 100644 --- a/data/trie/leafNode.go +++ b/data/trie/leafNode.go @@ -53,30 +53,60 @@ func leafNodeCapnToGo(src capnp.LeafNodeCapn, dest *leafNode) *leafNode { return dest } -func newLeafNode(key, value []byte) *leafNode { +func newLeafNode(key, value []byte, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (*leafNode, error) { + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, ErrNilHasher + } + return &leafNode{ CollapsedLn: protobuf.CollapsedLn{ Key: key, Value: value, }, - hash: nil, - dirty: true, - } + baseNode: &baseNode{ + dirty: true, + marsh: marshalizer, + hasher: hasher, + }, + }, nil } func (ln *leafNode) getHash() []byte { return ln.hash } +func (ln *leafNode) setGivenHash(hash []byte) { + ln.hash = hash +} + func (ln *leafNode) isDirty() bool { return ln.dirty } -func (ln *leafNode) getCollapsed(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { +func (ln *leafNode) getMarshalizer() marshal.Marshalizer { + return ln.marsh +} + +func (ln *leafNode) setMarshalizer(marshalizer marshal.Marshalizer) { + ln.marsh = marshalizer +} + +func (ln *leafNode) getHasher() hashing.Hasher { + return ln.hasher +} + +func (ln *leafNode) setHasher(hasher hashing.Hasher) { + ln.hasher = hasher +} + +func (ln *leafNode) getCollapsed() (node, error) { return ln, nil } -func (ln *leafNode) setHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (ln *leafNode) setHash() error { err := ln.isEmptyOrNil() if err != nil { return err @@ -84,7 +114,7 @@ func (ln *leafNode) setHash(marshalizer marshal.Marshalizer, hasher hashing.Hash if ln.getHash() != nil { return nil } - hash, err := hashChildrenAndNode(ln, marshalizer, hasher) + hash, err := hashChildrenAndNode(ln) if err != nil { return err } @@ -92,48 +122,51 @@ func (ln *leafNode) setHash(marshalizer marshal.Marshalizer, hasher hashing.Hash return nil } -func (ln *leafNode) setHashConcurrent(marshalizer marshal.Marshalizer, hasher hashing.Hasher, wg *sync.WaitGroup, c chan error) { - err := ln.setHash(marshalizer, hasher) +func (ln *leafNode) setHashConcurrent(wg *sync.WaitGroup, c chan error) { + err := ln.setHash() if err != nil { c <- err } wg.Done() } -func (ln *leafNode) setRootHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { - return ln.setHash(marshalizer, hasher) +func (ln *leafNode) setRootHash() error { + return ln.setHash() } -func (ln *leafNode) hashChildren(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (ln *leafNode) hashChildren() error { return nil } -func (ln *leafNode) hashNode(marshalizer marshal.Marshalizer, hasher hashing.Hasher) ([]byte, error) { +func (ln *leafNode) hashNode() ([]byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, err } - return encodeNodeAndGetHash(ln, marshalizer, hasher) + return encodeNodeAndGetHash(ln) } -func (ln *leafNode) commit(level byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func (ln *leafNode) commit(force bool, level byte, originDb data.DBWriteCacher, targetDb data.DBWriteCacher) error { err := ln.isEmptyOrNil() if err != nil { return err } - if !ln.dirty { + + shouldNotCommit := !ln.dirty && !force + if shouldNotCommit { return nil } + ln.dirty = false - return encodeNodeAndCommitToDB(ln, db, marshalizer, hasher) + return encodeNodeAndCommitToDB(ln, targetDb) } -func (ln *leafNode) getEncodedNode(marshalizer marshal.Marshalizer) ([]byte, error) { +func (ln *leafNode) getEncodedNode() ([]byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, err } - marshaledNode, err := marshalizer.Marshal(ln) + marshaledNode, err := ln.marsh.Marshal(ln) if err != nil { return nil, err } @@ -141,7 +174,7 @@ func (ln *leafNode) getEncodedNode(marshalizer marshal.Marshalizer) ([]byte, err return marshaledNode, nil } -func (ln *leafNode) resolveCollapsed(pos byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { +func (ln *leafNode) resolveCollapsed(pos byte, db data.DBWriteCacher) error { return nil } @@ -153,7 +186,7 @@ func (ln *leafNode) isPosCollapsed(pos int) bool { return false } -func (ln *leafNode) tryGet(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (value []byte, err error) { +func (ln *leafNode) tryGet(key []byte, db data.DBWriteCacher) (value []byte, err error) { err = ln.isEmptyOrNil() if err != nil { return nil, err @@ -165,7 +198,7 @@ func (ln *leafNode) tryGet(key []byte, db data.DBWriteCacher, marshalizer marsha return nil, nil } -func (ln *leafNode) getNext(key []byte, dbw data.DBWriteCacher, marshalizer marshal.Marshalizer) (node, []byte, error) { +func (ln *leafNode) getNext(key []byte, db data.DBWriteCacher) (node, []byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, nil, err @@ -176,46 +209,82 @@ func (ln *leafNode) getNext(key []byte, dbw data.DBWriteCacher, marshalizer mars return nil, nil, ErrNodeNotFound } -func (ln *leafNode) insert(n *leafNode, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) { +func (ln *leafNode) insert(n *leafNode, db data.DBWriteCacher) (bool, node, [][]byte, error) { err := ln.isEmptyOrNil() if err != nil { - return false, nil, err + return false, nil, [][]byte{}, err + } + + oldHash := make([][]byte, 0) + if !ln.dirty { + oldHash = append(oldHash, ln.hash) } + if bytes.Equal(n.Key, ln.Key) { ln.Value = n.Value ln.dirty = true ln.hash = nil - return true, ln, nil + return true, ln, oldHash, nil } keyMatchLen := prefixLen(n.Key, ln.Key) - branch := newBranchNode() + bn, err := newBranchNode(ln.marsh, ln.hasher) + if err != nil { + return false, nil, [][]byte{}, err + } + oldChildPos := ln.Key[keyMatchLen] newChildPos := n.Key[keyMatchLen] if childPosOutOfRange(oldChildPos) || childPosOutOfRange(newChildPos) { - return false, nil, ErrChildPosOutOfRange + return false, nil, [][]byte{}, ErrChildPosOutOfRange } - branch.children[oldChildPos] = newLeafNode(ln.Key[keyMatchLen+1:], ln.Value) - branch.children[newChildPos] = newLeafNode(n.Key[keyMatchLen+1:], n.Value) + newLnOldChildPos, err := newLeafNode(ln.Key[keyMatchLen+1:], ln.Value, ln.marsh, ln.hasher) + if err != nil { + return false, nil, [][]byte{}, err + } + bn.children[oldChildPos] = newLnOldChildPos + + newLnNewChildPos, err := newLeafNode(n.Key[keyMatchLen+1:], n.Value, ln.marsh, ln.hasher) + if err != nil { + return false, nil, [][]byte{}, err + } + bn.children[newChildPos] = newLnNewChildPos if keyMatchLen == 0 { - return true, branch, nil + return true, bn, oldHash, nil } - return true, newExtensionNode(ln.Key[:keyMatchLen], branch), nil + + newEn, err := newExtensionNode(ln.Key[:keyMatchLen], bn, ln.marsh, ln.hasher) + if err != nil { + return false, nil, [][]byte{}, err + } + + return true, newEn, oldHash, nil } -func (ln *leafNode) delete(key []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) { +func (ln *leafNode) delete(key []byte, db data.DBWriteCacher) (bool, node, [][]byte, error) { keyMatchLen := prefixLen(key, ln.Key) if keyMatchLen == len(key) { - return true, nil, nil + oldHash := make([][]byte, 0) + if !ln.dirty { + oldHash = append(oldHash, ln.hash) + } + + return true, nil, oldHash, nil } - return false, ln, nil + return false, ln, [][]byte{}, nil } -func (ln *leafNode) reduceNode(pos int) node { +func (ln *leafNode) reduceNode(pos int) (node, error) { k := append([]byte{byte(pos)}, ln.Key...) - return newLeafNode(k, ln.Value) + + newLn, err := newLeafNode(k, ln.Value, ln.marsh, ln.hasher) + if err != nil { + return nil, err + } + + return newLn, nil } func (ln *leafNode) isEmptyOrNil() error { @@ -251,7 +320,7 @@ func (ln *leafNode) deepClone() node { return nil } - clonedNode := &leafNode{} + clonedNode := &leafNode{baseNode: &baseNode{}} if ln.Key != nil { clonedNode.Key = make([]byte, len(ln.Key)) @@ -269,10 +338,45 @@ func (ln *leafNode) deepClone() node { } clonedNode.dirty = ln.dirty + clonedNode.marsh = ln.marsh + clonedNode.hasher = ln.hasher return clonedNode } +func (ln *leafNode) getDirtyHashes() ([][]byte, error) { + err := ln.isEmptyOrNil() + if err != nil { + return nil, err + } + + dirtyHashes := make([][]byte, 0) + + if !ln.isDirty() { + return dirtyHashes, nil + } + + dirtyHashes = append(dirtyHashes, ln.getHash()) + return dirtyHashes, nil +} + +func (ln *leafNode) getChildren(db data.DBWriteCacher) ([]node, error) { + return nil, nil +} + +func (ln *leafNode) isValid() bool { + return len(ln.Value) > 0 +} + +func (ln *leafNode) setDirty(dirty bool) { + ln.dirty = dirty +} + +func (ln *leafNode) loadChildren(syncer *trieSyncer) error { + syncer.interceptedNodes.Remove(ln.hash) + return nil +} + func (ln *leafNode) getAllLeaves(leaves map[string][]byte, key []byte, _ data.DBWriteCacher, _ marshal.Marshalizer) error { err := ln.isEmptyOrNil() if err != nil { diff --git a/data/trie/leafNode_test.go b/data/trie/leafNode_test.go index b1d738004f0..4fbf6379c63 100644 --- a/data/trie/leafNode_test.go +++ b/data/trie/leafNode_test.go @@ -6,268 +6,281 @@ import ( "fmt" "reflect" "testing" + "time" "github.com/ElrondNetwork/elrond-go/data/mock" protobuf "github.com/ElrondNetwork/elrond-go/data/trie/proto" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" "github.com/stretchr/testify/assert" ) -func getLn() *leafNode { - return newLeafNode([]byte("dog"), []byte("dog")) +func getLn(marsh marshal.Marshalizer, hasher hashing.Hasher) *leafNode { + newLn, _ := newLeafNode([]byte("dog"), []byte("dog"), marsh, hasher) + return newLn } func TestLeafNode_newLeafNode(t *testing.T) { t.Parallel() + + marsh, hasher := getTestMarshAndHasher() expectedLn := &leafNode{ CollapsedLn: protobuf.CollapsedLn{ Key: []byte("dog"), Value: []byte("dog"), }, - hash: nil, - dirty: true, + baseNode: &baseNode{ + dirty: true, + marsh: marsh, + hasher: hasher, + }, } - ln := newLeafNode([]byte("dog"), []byte("dog")) + ln, _ := newLeafNode([]byte("dog"), []byte("dog"), marsh, hasher) assert.Equal(t, expectedLn, ln) } func TestLeafNode_getHash(t *testing.T) { t.Parallel() - ln := &leafNode{hash: []byte("test hash")} + + ln := &leafNode{baseNode: &baseNode{hash: []byte("test hash")}} assert.Equal(t, ln.hash, ln.getHash()) } func TestLeafNode_isDirty(t *testing.T) { t.Parallel() - ln := &leafNode{dirty: true} + + ln := &leafNode{baseNode: &baseNode{dirty: true}} assert.Equal(t, true, ln.isDirty()) - ln = &leafNode{dirty: false} + ln = &leafNode{baseNode: &baseNode{dirty: false}} assert.Equal(t, false, ln.isDirty()) } func TestLeafNode_getCollapsed(t *testing.T) { t.Parallel() - ln := getLn() - marsh, hasher := getTestMarshAndHasher() - collapsed, err := ln.getCollapsed(marsh, hasher) + ln := getLn(getTestMarshAndHasher()) + + collapsed, err := ln.getCollapsed() assert.Nil(t, err) assert.Equal(t, ln, collapsed) } func TestLeafNode_setHash(t *testing.T) { t.Parallel() - ln := getLn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(ln, marsh, hasher) + ln := getLn(getTestMarshAndHasher()) + hash, _ := encodeNodeAndGetHash(ln) - err := ln.setHash(marsh, hasher) + err := ln.setHash() assert.Nil(t, err) assert.Equal(t, hash, ln.hash) } func TestLeafNode_setHashEmptyNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - ln := &leafNode{} - err := ln.setHash(marsh, hasher) + ln := &leafNode{baseNode: &baseNode{}} + + err := ln.setHash() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, ln.hash) } func TestLeafNode_setHashNilNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + var ln *leafNode - err := ln.setHash(marsh, hasher) + err := ln.setHash() assert.Equal(t, ErrNilNode, err) assert.Nil(t, ln) } +func TestLeafNode_setGivenHash(t *testing.T) { + t.Parallel() + + ln := &leafNode{baseNode: &baseNode{}} + expectedHash := []byte("node hash") + + ln.setGivenHash(expectedHash) + assert.Equal(t, expectedHash, ln.hash) +} + func TestLeafNode_hashChildren(t *testing.T) { t.Parallel() - ln := getLn() - marsh, hasher := getTestMarshAndHasher() - assert.Nil(t, ln.hashChildren(marsh, hasher)) + + ln := getLn(getTestMarshAndHasher()) + + assert.Nil(t, ln.hashChildren()) } func TestLeafNode_hashNode(t *testing.T) { t.Parallel() - ln := getLn() - marsh, hasher := getTestMarshAndHasher() - expectedHash, _ := encodeNodeAndGetHash(ln, marsh, hasher) - hash, err := ln.hashNode(marsh, hasher) + ln := getLn(getTestMarshAndHasher()) + expectedHash, _ := encodeNodeAndGetHash(ln) + + hash, err := ln.hashNode() assert.Nil(t, err) assert.Equal(t, expectedHash, hash) } func TestLeafNode_hashNodeEmptyNode(t *testing.T) { t.Parallel() + ln := &leafNode{} - marsh, hasher := getTestMarshAndHasher() - hash, err := ln.hashNode(marsh, hasher) + hash, err := ln.hashNode() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, hash) } func TestLeafNode_hashNodeNilNode(t *testing.T) { t.Parallel() + var ln *leafNode - marsh, hasher := getTestMarshAndHasher() - hash, err := ln.hashNode(marsh, hasher) + hash, err := ln.hashNode() assert.Equal(t, ErrNilNode, err) assert.Nil(t, hash) } func TestLeafNode_commit(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, hasher := getTestMarshAndHasher() - hash, _ := encodeNodeAndGetHash(ln, marsh, hasher) - _ = ln.setHash(marsh, hasher) + db := mock.NewMemDbMock() + ln := getLn(getTestMarshAndHasher()) + hash, _ := encodeNodeAndGetHash(ln) + _ = ln.setHash() - err := ln.commit(0, db, marsh, hasher) + err := ln.commit(false, 0, db, db) assert.Nil(t, err) encNode, _ := db.Get(hash) - node, _ := decodeNode(encNode, marsh) - ln = getLn() + node, _ := decodeNode(encNode, ln.marsh, ln.hasher) + ln = getLn(ln.marsh, ln.hasher) ln.dirty = false assert.Equal(t, ln, node) } func TestLeafNode_commitEmptyNode(t *testing.T) { t.Parallel() + ln := &leafNode{} - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - err := ln.commit(0, db, marsh, hasher) + err := ln.commit(false, 0, nil, nil) assert.Equal(t, ErrEmptyNode, err) } func TestLeafNode_commitNilNode(t *testing.T) { t.Parallel() + var ln *leafNode - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - err := ln.commit(0, db, marsh, hasher) + err := ln.commit(false, 0, nil, nil) assert.Equal(t, ErrNilNode, err) } func TestLeafNode_getEncodedNode(t *testing.T) { t.Parallel() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - expectedEncodedNode, _ := marsh.Marshal(ln) + ln := getLn(getTestMarshAndHasher()) + expectedEncodedNode, _ := ln.marsh.Marshal(ln) expectedEncodedNode = append(expectedEncodedNode, leaf) - encNode, err := ln.getEncodedNode(marsh) + encNode, err := ln.getEncodedNode() assert.Nil(t, err) assert.Equal(t, expectedEncodedNode, encNode) } func TestLeafNode_getEncodedNodeEmpty(t *testing.T) { t.Parallel() + ln := &leafNode{} - marsh, _ := getTestMarshAndHasher() - encNode, err := ln.getEncodedNode(marsh) + encNode, err := ln.getEncodedNode() assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, encNode) } func TestLeafNode_getEncodedNodeNil(t *testing.T) { t.Parallel() + var ln *leafNode - marsh, _ := getTestMarshAndHasher() - encNode, err := ln.getEncodedNode(marsh) + encNode, err := ln.getEncodedNode() assert.Equal(t, ErrNilNode, err) assert.Nil(t, encNode) } func TestLeafNode_resolveCollapsed(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - assert.Nil(t, ln.resolveCollapsed(0, db, marsh)) + ln := getLn(getTestMarshAndHasher()) + + assert.Nil(t, ln.resolveCollapsed(0, nil)) } func TestLeafNode_isCollapsed(t *testing.T) { t.Parallel() - ln := getLn() + + ln := getLn(getTestMarshAndHasher()) assert.False(t, ln.isCollapsed()) } func TestLeafNode_tryGet(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - key := []byte{100, 111, 103} - val, err := ln.tryGet(key, db, marsh) + ln := getLn(getTestMarshAndHasher()) + key := []byte("dog") + + val, err := ln.tryGet(key, nil) assert.Equal(t, []byte("dog"), val) assert.Nil(t, err) } func TestLeafNode_tryGetWrongKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - key := []byte{1, 2, 3} - val, err := ln.tryGet(key, db, marsh) + ln := getLn(getTestMarshAndHasher()) + wrongKey := []byte{1, 2, 3} + + val, err := ln.tryGet(wrongKey, nil) assert.Nil(t, val) assert.Nil(t, err) } func TestLeafNode_tryGetEmptyNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + ln := &leafNode{} - marsh, _ := getTestMarshAndHasher() - key := []byte{100, 111, 103} - val, err := ln.tryGet(key, db, marsh) + key := []byte("dog") + val, err := ln.tryGet(key, nil) assert.Equal(t, ErrEmptyNode, err) assert.Nil(t, val) } func TestLeafNode_tryGetNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var ln *leafNode - marsh, _ := getTestMarshAndHasher() + key := []byte("dog") - key := []byte{100, 111, 103} - val, err := ln.tryGet(key, db, marsh) + val, err := ln.tryGet(key, nil) assert.Equal(t, ErrNilNode, err) assert.Nil(t, val) } func TestLeafNode_getNext(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - key := []byte{100, 111, 103} - node, key, err := ln.getNext(key, db, marsh) + ln := getLn(getTestMarshAndHasher()) + key := []byte("dog") + + node, key, err := ln.getNext(key, nil) assert.Nil(t, node) assert.Nil(t, key) assert.Nil(t, err) @@ -275,12 +288,11 @@ func TestLeafNode_getNext(t *testing.T) { func TestLeafNode_getNextWrongKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - key := []byte{2, 100, 111, 103} - node, key, err := ln.getNext(key, db, marsh) + ln := getLn(getTestMarshAndHasher()) + wrongKey := append([]byte{2}, []byte("dog")...) + + node, key, err := ln.getNext(wrongKey, nil) assert.Nil(t, node) assert.Nil(t, key) assert.Equal(t, ErrNodeNotFound, err) @@ -288,12 +300,11 @@ func TestLeafNode_getNextWrongKey(t *testing.T) { func TestLeafNode_getNextNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var ln *leafNode - marsh, _ := getTestMarshAndHasher() - key := []byte{2, 100, 111, 103} + key := []byte("dog") - node, key, err := ln.getNext(key, db, marsh) + node, key, err := ln.getNext(key, nil) assert.Nil(t, node) assert.Nil(t, key) assert.Equal(t, ErrNilNode, err) @@ -301,42 +312,103 @@ func TestLeafNode_getNextNilNode(t *testing.T) { func TestLeafNode_insertAtSameKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - node := newLeafNode([]byte{100, 111, 103}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() + ln := getLn(getTestMarshAndHasher()) + key := []byte("dog") + expectedVal := []byte("dogs") + node, _ := newLeafNode(key, expectedVal, ln.marsh, ln.hasher) - dirty, newNode, err := ln.insert(node, db, marsh) + dirty, newNode, _, err := ln.insert(node, nil) assert.True(t, dirty) assert.Nil(t, err) - val, _ := newNode.tryGet([]byte{100, 111, 103}, db, marsh) - assert.Equal(t, []byte("dogs"), val) + + val, _ := newNode.tryGet(key, nil) + assert.Equal(t, expectedVal, val) } func TestLeafNode_insertAtDifferentKey(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := newLeafNode([]byte{2, 100, 111, 103}, []byte{100, 111, 103}) - node := newLeafNode([]byte{3, 4, 5}, []byte{3, 4, 5}) - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := ln.insert(node, db, marsh) + marsh, hasher := getTestMarshAndHasher() + + lnKey := []byte{2, 100, 111, 103} + ln, _ := newLeafNode(lnKey, []byte("dog"), marsh, hasher) + + nodeKey := []byte{3, 4, 5} + nodeVal := []byte{3, 4, 5} + node, _ := newLeafNode(nodeKey, nodeVal, marsh, hasher) + + dirty, newNode, _, err := ln.insert(node, nil) assert.True(t, dirty) assert.Nil(t, err) - val, _ := newNode.tryGet([]byte{3, 4, 5}, db, marsh) - assert.Equal(t, []byte{3, 4, 5}, val) + + val, _ := newNode.tryGet(nodeKey, nil) + assert.Equal(t, nodeVal, val) assert.IsType(t, &branchNode{}, newNode) } +func TestLeafNode_insertInStoredLnAtSameKey(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + ln := getLn(getTestMarshAndHasher()) + node, _ := newLeafNode([]byte("dog"), []byte("dogs"), ln.marsh, ln.hasher) + _ = ln.commit(false, 0, db, db) + lnHash := ln.getHash() + + dirty, _, oldHashes, err := ln.insert(node, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{lnHash}, oldHashes) +} + +func TestLeafNode_insertInStoredLnAtDifferentKey(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + marsh, hasher := getTestMarshAndHasher() + ln, _ := newLeafNode([]byte{1, 2, 3}, []byte("dog"), marsh, hasher) + node, _ := newLeafNode([]byte{4, 5, 6}, []byte("dogs"), marsh, hasher) + _ = ln.commit(false, 0, db, db) + lnHash := ln.getHash() + + dirty, _, oldHashes, err := ln.insert(node, db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{lnHash}, oldHashes) +} + +func TestLeafNode_insertInDirtyLnAtSameKey(t *testing.T) { + t.Parallel() + + ln := getLn(getTestMarshAndHasher()) + node, _ := newLeafNode([]byte("dog"), []byte("dogs"), ln.marsh, ln.hasher) + + dirty, _, oldHashes, err := ln.insert(node, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + +func TestLeafNode_insertInDirtyLnAtDifferentKey(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + ln, _ := newLeafNode([]byte{1, 2, 3}, []byte("dog"), marsh, hasher) + node, _ := newLeafNode([]byte{4, 5, 6}, []byte("dogs"), marsh, hasher) + + dirty, _, oldHashes, err := ln.insert(node, nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + func TestLeafNode_insertInNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() + var ln *leafNode - node := newLeafNode([]byte{0, 2, 3}, []byte("dogs")) - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := ln.insert(node, db, marsh) + dirty, newNode, _, err := ln.insert(&leafNode{}, nil) assert.False(t, dirty) assert.Equal(t, ErrNilNode, err) assert.Nil(t, newNode) @@ -344,23 +416,61 @@ func TestLeafNode_insertInNilNode(t *testing.T) { func TestLeafNode_deletePresent(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := ln.delete([]byte{100, 111, 103}, db, marsh) + ln := getLn(getTestMarshAndHasher()) + + dirty, newNode, _, err := ln.delete([]byte("dog"), nil) assert.True(t, dirty) assert.Nil(t, err) assert.Nil(t, newNode) } +func TestLeafNode_deleteFromStoredLnAtSameKey(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + ln := getLn(getTestMarshAndHasher()) + _ = ln.commit(false, 0, db, db) + lnHash := ln.getHash() + + dirty, _, oldHashes, err := ln.delete([]byte("dog"), db) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{lnHash}, oldHashes) +} + +func TestLeafNode_deleteFromLnAtDifferentKey(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + ln := getLn(getTestMarshAndHasher()) + _ = ln.commit(false, 0, db, db) + wrongKey := []byte{1, 2, 3} + + dirty, _, oldHashes, err := ln.delete(wrongKey, db) + assert.False(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + +func TestLeafNode_deleteFromDirtyLnAtSameKey(t *testing.T) { + t.Parallel() + + ln := getLn(getTestMarshAndHasher()) + + dirty, _, oldHashes, err := ln.delete([]byte("dog"), nil) + assert.True(t, dirty) + assert.Nil(t, err) + assert.Equal(t, [][]byte{}, oldHashes) +} + func TestLeafNode_deleteNotPresent(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - ln := getLn() - marsh, _ := getTestMarshAndHasher() - dirty, newNode, err := ln.delete([]byte{1, 2, 3}, db, marsh) + ln := getLn(getTestMarshAndHasher()) + wrongKey := []byte{1, 2, 3} + + dirty, newNode, _, err := ln.delete(wrongKey, nil) assert.False(t, dirty) assert.Nil(t, err) assert.Equal(t, ln, newNode) @@ -368,14 +478,20 @@ func TestLeafNode_deleteNotPresent(t *testing.T) { func TestLeafNode_reduceNode(t *testing.T) { t.Parallel() - ln := &leafNode{CollapsedLn: protobuf.CollapsedLn{Key: []byte{100, 111, 103}}} - expected := &leafNode{CollapsedLn: protobuf.CollapsedLn{Key: []byte{2, 100, 111, 103}}, dirty: true} - node := ln.reduceNode(2) + + marsh, hasher := getTestMarshAndHasher() + ln, _ := newLeafNode([]byte{100, 111, 103}, nil, marsh, hasher) + expected, _ := newLeafNode([]byte{2, 100, 111, 103}, nil, marsh, hasher) + expected.dirty = true + + node, err := ln.reduceNode(2) assert.Equal(t, expected, node) + assert.Nil(t, err) } func TestLeafNode_isEmptyOrNil(t *testing.T) { t.Parallel() + ln := &leafNode{} assert.Equal(t, ErrEmptyNode, ln.isEmptyOrNil()) @@ -383,12 +499,66 @@ func TestLeafNode_isEmptyOrNil(t *testing.T) { assert.Equal(t, ErrNilNode, ln.isEmptyOrNil()) } +func TestLeafNode_getChildren(t *testing.T) { + t.Parallel() + + ln := getLn(getTestMarshAndHasher()) + + children, err := ln.getChildren(nil) + assert.Nil(t, err) + assert.Equal(t, 0, len(children)) +} + +func TestLeafNode_isValid(t *testing.T) { + t.Parallel() + + ln := getLn(getTestMarshAndHasher()) + assert.True(t, ln.isValid()) + + ln.Value = []byte{} + assert.False(t, ln.isValid()) +} + +func TestLeafNode_setDirty(t *testing.T) { + t.Parallel() + + ln := &leafNode{baseNode: &baseNode{}} + ln.setDirty(true) + + assert.True(t, ln.dirty) +} + +func TestLeafNode_loadChildren(t *testing.T) { + t.Parallel() + + marsh, hasher := getTestMarshAndHasher() + tr := initTrie() + nodes, hashes := getEncodedTrieNodesAndHashes(tr) + nodesCacher, _ := lrucache.NewCache(100) + + resolver := &mock.TrieNodesResolverStub{} + for i := range nodes { + node, _ := NewInterceptedTrieNode(nodes[i], marsh, hasher) + nodesCacher.Put(node.hash, node) + } + syncer, _ := NewTrieSyncer(resolver, nodesCacher, tr, time.Second) + syncer.interceptedNodes.RegisterHandler(func(key []byte) { + syncer.chRcvTrieNodes <- true + }) + + lnPosition := 5 + ln := &leafNode{baseNode: &baseNode{hash: hashes[lnPosition]}} + err := ln.loadChildren(syncer) + assert.Nil(t, err) + assert.Equal(t, 5, nodesCacher.Len()) +} + //------- deepClone func TestLeafNode_deepCloneWithNilHashShouldWork(t *testing.T) { t.Parallel() - ln := &leafNode{} + ln := &leafNode{baseNode: &baseNode{}} ln.dirty = true ln.hash = nil ln.Value = getRandomByteSlice() @@ -402,7 +572,7 @@ func TestLeafNode_deepCloneWithNilHashShouldWork(t *testing.T) { func TestLeafNode_deepCloneWithNilValueShouldWork(t *testing.T) { t.Parallel() - ln := &leafNode{} + ln := &leafNode{baseNode: &baseNode{}} ln.dirty = true ln.hash = getRandomByteSlice() ln.Value = nil @@ -416,7 +586,7 @@ func TestLeafNode_deepCloneWithNilValueShouldWork(t *testing.T) { func TestLeafNode_deepCloneWithNilKeyShouldWork(t *testing.T) { t.Parallel() - ln := &leafNode{} + ln := &leafNode{baseNode: &baseNode{}} ln.dirty = true ln.hash = getRandomByteSlice() ln.Value = getRandomByteSlice() @@ -430,7 +600,7 @@ func TestLeafNode_deepCloneWithNilKeyShouldWork(t *testing.T) { func TestLeafNode_deepCloneShouldWork(t *testing.T) { t.Parallel() - ln := &leafNode{} + ln := &leafNode{baseNode: &baseNode{}} ln.dirty = true ln.hash = getRandomByteSlice() ln.Value = getRandomByteSlice() @@ -463,10 +633,10 @@ func getRandomByteSlice() []byte { func getLeafNodeContents(lf *leafNode) string { str := fmt.Sprintf(`leaf node: - key: %s - value: %s - hash: %s - dirty: %v + key: %s + value: %s + hash: %s + dirty: %v `, hex.EncodeToString(lf.Key), hex.EncodeToString(lf.Value), diff --git a/data/trie/node.go b/data/trie/node.go index 44e13ba6693..7266abefcbc 100644 --- a/data/trie/node.go +++ b/data/trie/node.go @@ -1,9 +1,6 @@ package trie import ( - "io" - "sync" - "github.com/ElrondNetwork/elrond-go/data" protobuf "github.com/ElrondNetwork/elrond-go/data/trie/proto" "github.com/ElrondNetwork/elrond-go/hashing" @@ -15,58 +12,37 @@ const firstByte = 0 const maxTrieLevelAfterCommit = 6 const hexTerminator = 16 -type node interface { - getHash() []byte - setHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error - setHashConcurrent(marshalizer marshal.Marshalizer, hasher hashing.Hasher, wg *sync.WaitGroup, c chan error) - setRootHash(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error - getCollapsed(marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) // a collapsed node is a node that instead of the children holds the children hashes - isCollapsed() bool - isPosCollapsed(pos int) bool - isDirty() bool - getEncodedNode(marshal.Marshalizer) ([]byte, error) - commit(level byte, dbw data.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) error - resolveCollapsed(pos byte, dbw data.DBWriteCacher, marshalizer marshal.Marshalizer) error - hashNode(marshalizer marshal.Marshalizer, hasher hashing.Hasher) ([]byte, error) - hashChildren(marshalizer marshal.Marshalizer, hasher hashing.Hasher) error - tryGet(key []byte, dbw data.DBWriteCacher, marshalizer marshal.Marshalizer) ([]byte, error) - getNext(key []byte, dbw data.DBWriteCacher, marshalizer marshal.Marshalizer) (node, []byte, error) - insert(n *leafNode, dbw data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) - delete(key []byte, dbw data.DBWriteCacher, marshalizer marshal.Marshalizer) (bool, node, error) - reduceNode(pos int) node - isEmptyOrNil() error - print(writer io.Writer, index int) - deepClone() node - getAllLeaves(map[string][]byte, []byte, data.DBWriteCacher, marshal.Marshalizer) error +type baseNode struct { + hash []byte + dirty bool + marsh marshal.Marshalizer + hasher hashing.Hasher } type branchNode struct { protobuf.CollapsedBn children [nrOfChildren]node - hash []byte - dirty bool + *baseNode } type extensionNode struct { protobuf.CollapsedEn child node - hash []byte - dirty bool + *baseNode } type leafNode struct { protobuf.CollapsedLn - hash []byte - dirty bool + *baseNode } -func hashChildrenAndNode(n node, marshalizer marshal.Marshalizer, hasher hashing.Hasher) ([]byte, error) { - err := n.hashChildren(marshalizer, hasher) +func hashChildrenAndNode(n node) ([]byte, error) { + err := n.hashChildren() if err != nil { return nil, err } - hashed, err := n.hashNode(marshalizer, hasher) + hashed, err := n.hashNode() if err != nil { return nil, err } @@ -74,33 +50,33 @@ func hashChildrenAndNode(n node, marshalizer marshal.Marshalizer, hasher hashing return hashed, nil } -func encodeNodeAndGetHash(n node, marshalizer marshal.Marshalizer, hasher hashing.Hasher) ([]byte, error) { - encNode, err := n.getEncodedNode(marshalizer) +func encodeNodeAndGetHash(n node) ([]byte, error) { + encNode, err := n.getEncodedNode() if err != nil { return nil, err } - hash := hasher.Compute(string(encNode)) + hash := n.getHasher().Compute(string(encNode)) return hash, nil } -func encodeNodeAndCommitToDB(n node, db data.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) error { +func encodeNodeAndCommitToDB(n node, db data.DBWriteCacher) error { key := n.getHash() if key == nil { - err := n.setHash(marshalizer, hasher) + err := n.setHash() if err != nil { return err } key = n.getHash() } - n, err := n.getCollapsed(marshalizer, hasher) + n, err := n.getCollapsed() if err != nil { return err } - val, err := n.getEncodedNode(marshalizer) + val, err := n.getEncodedNode() if err != nil { return err } @@ -110,28 +86,28 @@ func encodeNodeAndCommitToDB(n node, db data.DBWriteCacher, marshalizer marshal. return err } -func getNodeFromDBAndDecode(n []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) (node, error) { +func getNodeFromDBAndDecode(n []byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { return nil, err } - node, err := decodeNode(encChild, marshalizer) + decodedNode, err := decodeNode(encChild, marshalizer, hasher) if err != nil { return nil, err } - return node, nil + return decodedNode, nil } -func resolveIfCollapsed(n node, pos byte, db data.DBWriteCacher, marshalizer marshal.Marshalizer) error { +func resolveIfCollapsed(n node, pos byte, db data.DBWriteCacher) error { err := n.isEmptyOrNil() if err != nil { return err } if n.isPosCollapsed(int(pos)) { - err := n.resolveCollapsed(pos, db, marshalizer) + err = n.resolveCollapsed(pos, db) if err != nil { return err } @@ -163,7 +139,7 @@ func hasValidHash(n node) (bool, error) { return true, nil } -func decodeNode(encNode []byte, marshalizer marshal.Marshalizer) (node, error) { +func decodeNode(encNode []byte, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { if encNode == nil || len(encNode) < 1 { return nil, ErrInvalidEncoding } @@ -171,32 +147,33 @@ func decodeNode(encNode []byte, marshalizer marshal.Marshalizer) (node, error) { nodeType := encNode[len(encNode)-1] encNode = encNode[:len(encNode)-1] - node, err := getEmptyNodeOfType(nodeType) + newNode, err := getEmptyNodeOfType(nodeType) if err != nil { return nil, err } - err = marshalizer.Unmarshal(node, encNode) + err = marshalizer.Unmarshal(newNode, encNode) if err != nil { return nil, err } - return node, nil + newNode.setMarshalizer(marshalizer) + newNode.setHasher(hasher) + + return newNode, nil } func getEmptyNodeOfType(t byte) (node, error) { - var decNode node switch t { case extension: - decNode = &extensionNode{} + return &extensionNode{baseNode: &baseNode{}}, nil case leaf: - decNode = &leafNode{} + return &leafNode{baseNode: &baseNode{}}, nil case branch: - decNode = newBranchNode() + return &branchNode{baseNode: &baseNode{}}, nil default: return nil, ErrInvalidNode } - return decNode, nil } func childPosOutOfRange(pos byte) bool { diff --git a/data/trie/node_test.go b/data/trie/node_test.go index b509770c171..5013d372f1c 100644 --- a/data/trie/node_test.go +++ b/data/trie/node_test.go @@ -2,103 +2,119 @@ package trie import ( "testing" + "time" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/mock" - protobuf "github.com/ElrondNetwork/elrond-go/data/trie/proto" + "github.com/ElrondNetwork/elrond-go/data/trie/proto" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/stretchr/testify/assert" ) +var snapshotDelay = time.Second +var batchDelay = 2 * time.Second + func TestNode_hashChildrenAndNodeBranchNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - bn, collapsedBn := getBnAndCollapsedBn() - expectedNodeHash, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) - hash, err := hashChildrenAndNode(bn, marsh, hasher) + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + expectedNodeHash, _ := encodeNodeAndGetHash(collapsedBn) + + hash, err := hashChildrenAndNode(bn) assert.Nil(t, err) assert.Equal(t, expectedNodeHash, hash) } func TestNode_hashChildrenAndNodeExtensionNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + en, collapsedEn := getEnAndCollapsedEn() - expectedNodeHash, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) + expectedNodeHash, _ := encodeNodeAndGetHash(collapsedEn) - hash, err := hashChildrenAndNode(en, marsh, hasher) + hash, err := hashChildrenAndNode(en) assert.Nil(t, err) assert.Equal(t, expectedNodeHash, hash) } func TestNode_hashChildrenAndNodeLeafNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - ln := getLn() - expectedNodeHash, _ := encodeNodeAndGetHash(ln, marsh, hasher) - hash, err := hashChildrenAndNode(ln, marsh, hasher) + ln := getLn(getTestMarshAndHasher()) + expectedNodeHash, _ := encodeNodeAndGetHash(ln) + + hash, err := hashChildrenAndNode(ln) assert.Nil(t, err) assert.Equal(t, expectedNodeHash, hash) } func TestNode_encodeNodeAndGetHashBranchNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() + bn, _ := newBranchNode(getTestMarshAndHasher()) encChildren := make([][]byte, nrOfChildren) encChildren[1] = []byte("dog") encChildren[10] = []byte("doge") - bn := newBranchNode() bn.EncodedChildren = encChildren - encNode, _ := marsh.Marshal(bn) + encNode, _ := bn.marsh.Marshal(bn) encNode = append(encNode, branch) - expextedHash := hasher.Compute(string(encNode)) + expextedHash := bn.hasher.Compute(string(encNode)) - hash, err := encodeNodeAndGetHash(bn, marsh, hasher) + hash, err := encodeNodeAndGetHash(bn) assert.Nil(t, err) assert.Equal(t, expextedHash, hash) } func TestNode_encodeNodeAndGetHashExtensionNode(t *testing.T) { t.Parallel() + marsh, hasher := getTestMarshAndHasher() - en := &extensionNode{CollapsedEn: protobuf.CollapsedEn{Key: []byte{2}, EncodedChild: []byte("doge")}} + en := &extensionNode{ + CollapsedEn: protobuf.CollapsedEn{ + Key: []byte{2}, + EncodedChild: []byte("doge"), + }, + baseNode: &baseNode{ + + marsh: marsh, + hasher: hasher, + }, + } encNode, _ := marsh.Marshal(en) encNode = append(encNode, extension) expextedHash := hasher.Compute(string(encNode)) - hash, err := encodeNodeAndGetHash(en, marsh, hasher) + hash, err := encodeNodeAndGetHash(en) assert.Nil(t, err) assert.Equal(t, expextedHash, hash) } func TestNode_encodeNodeAndGetHashLeafNode(t *testing.T) { t.Parallel() + marsh, hasher := getTestMarshAndHasher() - ln := newLeafNode([]byte{100, 111, 103}, []byte("dog")) + ln, _ := newLeafNode([]byte("dog"), []byte("dog"), marsh, hasher) encNode, _ := marsh.Marshal(ln) encNode = append(encNode, leaf) expextedHash := hasher.Compute(string(encNode)) - hash, err := encodeNodeAndGetHash(ln, marsh, hasher) + hash, err := encodeNodeAndGetHash(ln) assert.Nil(t, err) assert.Equal(t, expextedHash, hash) } func TestNode_encodeNodeAndCommitToDBBranchNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - _, collapsedBn := getBnAndCollapsedBn() - encNode, _ := marsh.Marshal(collapsedBn) + + db := mock.NewMemDbMock() + _, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + encNode, _ := collapsedBn.marsh.Marshal(collapsedBn) encNode = append(encNode, branch) - nodeHash := hasher.Compute(string(encNode)) + nodeHash := collapsedBn.hasher.Compute(string(encNode)) - err := encodeNodeAndCommitToDB(collapsedBn, db, marsh, hasher) + err := encodeNodeAndCommitToDB(collapsedBn, db) assert.Nil(t, err) val, _ := db.Get(nodeHash) @@ -107,14 +123,14 @@ func TestNode_encodeNodeAndCommitToDBBranchNode(t *testing.T) { func TestNode_encodeNodeAndCommitToDBExtensionNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() + + db := mock.NewMemDbMock() _, collapsedEn := getEnAndCollapsedEn() - encNode, _ := marsh.Marshal(collapsedEn) + encNode, _ := collapsedEn.marsh.Marshal(collapsedEn) encNode = append(encNode, extension) - nodeHash := hasher.Compute(string(encNode)) + nodeHash := collapsedEn.hasher.Compute(string(encNode)) - err := encodeNodeAndCommitToDB(collapsedEn, db, marsh, hasher) + err := encodeNodeAndCommitToDB(collapsedEn, db) assert.Nil(t, err) val, _ := db.Get(nodeHash) @@ -123,14 +139,14 @@ func TestNode_encodeNodeAndCommitToDBExtensionNode(t *testing.T) { func TestNode_encodeNodeAndCommitToDBLeafNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - ln := getLn() - encNode, _ := marsh.Marshal(ln) + + db := mock.NewMemDbMock() + ln := getLn(getTestMarshAndHasher()) + encNode, _ := ln.marsh.Marshal(ln) encNode = append(encNode, leaf) - nodeHash := hasher.Compute(string(encNode)) + nodeHash := ln.hasher.Compute(string(encNode)) - err := encodeNodeAndCommitToDB(ln, db, marsh, hasher) + err := encodeNodeAndCommitToDB(ln, db) assert.Nil(t, err) val, _ := db.Get(nodeHash) @@ -139,111 +155,110 @@ func TestNode_encodeNodeAndCommitToDBLeafNode(t *testing.T) { func TestNode_getNodeFromDBAndDecodeBranchNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - bn, collapsedBn := getBnAndCollapsedBn() - bn.commit(0, db, marsh, hasher) - encNode, _ := marsh.Marshal(collapsedBn) + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + _ = bn.commit(false, 0, db, db) + + encNode, _ := bn.marsh.Marshal(collapsedBn) encNode = append(encNode, branch) - nodeHash := hasher.Compute(string(encNode)) + nodeHash := bn.hasher.Compute(string(encNode)) - node, err := getNodeFromDBAndDecode(nodeHash, db, marsh) + node, err := getNodeFromDBAndDecode(nodeHash, db, bn.marsh, bn.hasher) assert.Nil(t, err) - h1, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + h1, _ := encodeNodeAndGetHash(collapsedBn) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestNode_getNodeFromDBAndDecodeExtensionNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() + + db := mock.NewMemDbMock() en, collapsedEn := getEnAndCollapsedEn() - en.commit(0, db, marsh, hasher) + _ = en.commit(false, 0, db, db) - encNode, _ := marsh.Marshal(collapsedEn) + encNode, _ := en.marsh.Marshal(collapsedEn) encNode = append(encNode, extension) - nodeHash := hasher.Compute(string(encNode)) + nodeHash := en.hasher.Compute(string(encNode)) - node, err := getNodeFromDBAndDecode(nodeHash, db, marsh) + node, err := getNodeFromDBAndDecode(nodeHash, db, en.marsh, en.hasher) assert.Nil(t, err) - h1, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + h1, _ := encodeNodeAndGetHash(collapsedEn) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestNode_getNodeFromDBAndDecodeLeafNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - ln := getLn() - ln.commit(0, db, marsh, hasher) - encNode, _ := marsh.Marshal(ln) + db := mock.NewMemDbMock() + ln := getLn(getTestMarshAndHasher()) + _ = ln.commit(false, 0, db, db) + + encNode, _ := ln.marsh.Marshal(ln) encNode = append(encNode, leaf) - nodeHash := hasher.Compute(string(encNode)) + nodeHash := ln.hasher.Compute(string(encNode)) - node, err := getNodeFromDBAndDecode(nodeHash, db, marsh) + node, err := getNodeFromDBAndDecode(nodeHash, db, ln.marsh, ln.hasher) assert.Nil(t, err) - ln = getLn() + + ln = getLn(ln.marsh, ln.hasher) ln.dirty = false assert.Equal(t, ln, node) } func TestNode_resolveIfCollapsedBranchNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - bn, collapsedBn := getBnAndCollapsedBn() - bn.commit(0, db, marsh, hasher) + db := mock.NewMemDbMock() + bn, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + childPos := byte(2) + _ = bn.commit(false, 0, db, db) - err := resolveIfCollapsed(collapsedBn, 2, db, marsh) + err := resolveIfCollapsed(collapsedBn, childPos, db) assert.Nil(t, err) assert.False(t, collapsedBn.isCollapsed()) } func TestNode_resolveIfCollapsedExtensionNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - en, collapsedEn := getEnAndCollapsedEn() - en.commit(0, db, marsh, hasher) + db := mock.NewMemDbMock() + en, collapsedEn := getEnAndCollapsedEn() + _ = en.commit(false, 0, db, db) - err := resolveIfCollapsed(collapsedEn, 0, db, marsh) + err := resolveIfCollapsed(collapsedEn, 0, db) assert.Nil(t, err) assert.False(t, collapsedEn.isCollapsed()) } func TestNode_resolveIfCollapsedLeafNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, hasher := getTestMarshAndHasher() - ln := getLn() - ln.commit(0, db, marsh, hasher) + db := mock.NewMemDbMock() + ln := getLn(getTestMarshAndHasher()) + _ = ln.commit(false, 0, db, db) - err := resolveIfCollapsed(ln, 0, db, marsh) + err := resolveIfCollapsed(ln, 0, db) assert.Nil(t, err) assert.False(t, ln.isCollapsed()) } func TestNode_resolveIfCollapsedNilNode(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marsh, _ := getTestMarshAndHasher() + var node *extensionNode - err := resolveIfCollapsed(node, 0, db, marsh) + err := resolveIfCollapsed(node, 0, nil) assert.Equal(t, ErrNilNode, err) } func TestNode_concat(t *testing.T) { t.Parallel() + a := []byte{1, 2, 3} var b byte b = 4 @@ -253,13 +268,13 @@ func TestNode_concat(t *testing.T) { func TestNode_hasValidHash(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - bn, _ := getBnAndCollapsedBn() + + bn, _ := getBnAndCollapsedBn(getTestMarshAndHasher()) ok, err := hasValidHash(bn) assert.Nil(t, err) assert.False(t, ok) - bn.setHash(marsh, hasher) + _ = bn.setHash() bn.dirty = false ok, err = hasValidHash(bn) @@ -269,6 +284,7 @@ func TestNode_hasValidHash(t *testing.T) { func TestNode_hasValidHashNilNode(t *testing.T) { t.Parallel() + var node *branchNode ok, err := hasValidHash(node) assert.Equal(t, ErrNilNode, err) @@ -277,79 +293,78 @@ func TestNode_hasValidHashNilNode(t *testing.T) { func TestNode_decodeNodeBranchNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - _, collapsedBn := getBnAndCollapsedBn() - encNode, _ := marsh.Marshal(collapsedBn) + _, collapsedBn := getBnAndCollapsedBn(getTestMarshAndHasher()) + encNode, _ := collapsedBn.marsh.Marshal(collapsedBn) encNode = append(encNode, branch) - node, err := decodeNode(encNode, marsh) + node, err := decodeNode(encNode, collapsedBn.marsh, collapsedBn.hasher) assert.Nil(t, err) - h1, _ := encodeNodeAndGetHash(collapsedBn, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + h1, _ := encodeNodeAndGetHash(collapsedBn) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestNode_decodeNodeExtensionNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - _, collapsedEn := getEnAndCollapsedEn() - encNode, _ := marsh.Marshal(collapsedEn) + _, collapsedEn := getEnAndCollapsedEn() + encNode, _ := collapsedEn.marsh.Marshal(collapsedEn) encNode = append(encNode, extension) - node, err := decodeNode(encNode, marsh) + node, err := decodeNode(encNode, collapsedEn.marsh, collapsedEn.hasher) assert.Nil(t, err) - h1, _ := encodeNodeAndGetHash(collapsedEn, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + h1, _ := encodeNodeAndGetHash(collapsedEn) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestNode_decodeNodeLeafNode(t *testing.T) { t.Parallel() - marsh, hasher := getTestMarshAndHasher() - ln := getLn() - encNode, _ := marsh.Marshal(ln) + ln := getLn(getTestMarshAndHasher()) + encNode, _ := ln.marsh.Marshal(ln) encNode = append(encNode, leaf) - node, err := decodeNode(encNode, marsh) + node, err := decodeNode(encNode, ln.marsh, ln.hasher) assert.Nil(t, err) ln.dirty = false - h1, _ := encodeNodeAndGetHash(ln, marsh, hasher) - h2, _ := encodeNodeAndGetHash(node, marsh, hasher) + h1, _ := encodeNodeAndGetHash(ln) + h2, _ := encodeNodeAndGetHash(node) assert.Equal(t, h1, h2) } func TestNode_decodeNodeInvalidNode(t *testing.T) { t.Parallel() - marsh, _ := getTestMarshAndHasher() - ln := getLn() - encNode, _ := marsh.Marshal(ln) - encNode = append(encNode, 6) + ln := getLn(getTestMarshAndHasher()) + invalidNode := byte(6) + + encNode, _ := ln.marsh.Marshal(ln) + encNode = append(encNode, invalidNode) - node, err := decodeNode(encNode, marsh) + node, err := decodeNode(encNode, ln.marsh, ln.hasher) assert.Nil(t, node) assert.Equal(t, ErrInvalidNode, err) } func TestNode_decodeNodeInvalidEncoding(t *testing.T) { t.Parallel() - marsh, _ := getTestMarshAndHasher() + marsh, hasher := getTestMarshAndHasher() var encNode []byte - node, err := decodeNode(encNode, marsh) + node, err := decodeNode(encNode, marsh, hasher) assert.Nil(t, node) assert.Equal(t, ErrInvalidEncoding, err) } func TestNode_getEmptyNodeOfTypeBranchNode(t *testing.T) { t.Parallel() + bn, err := getEmptyNodeOfType(branch) assert.Nil(t, err) assert.IsType(t, &branchNode{}, bn) @@ -357,6 +372,7 @@ func TestNode_getEmptyNodeOfTypeBranchNode(t *testing.T) { func TestNode_getEmptyNodeOfTypeExtensionNode(t *testing.T) { t.Parallel() + en, err := getEmptyNodeOfType(extension) assert.Nil(t, err) assert.IsType(t, &extensionNode{}, en) @@ -364,6 +380,7 @@ func TestNode_getEmptyNodeOfTypeExtensionNode(t *testing.T) { func TestNode_getEmptyNodeOfTypeLeafNode(t *testing.T) { t.Parallel() + ln, err := getEmptyNodeOfType(leaf) assert.Nil(t, err) assert.IsType(t, &leafNode{}, ln) @@ -371,6 +388,7 @@ func TestNode_getEmptyNodeOfTypeLeafNode(t *testing.T) { func TestNode_getEmptyNodeOfTypeWrongNode(t *testing.T) { t.Parallel() + n, err := getEmptyNodeOfType(6) assert.Equal(t, ErrInvalidNode, err) assert.Nil(t, n) @@ -378,14 +396,19 @@ func TestNode_getEmptyNodeOfTypeWrongNode(t *testing.T) { func TestNode_childPosOutOfRange(t *testing.T) { t.Parallel() + assert.True(t, childPosOutOfRange(17)) assert.False(t, childPosOutOfRange(5)) } func TestMarshalingAndUnmarshalingWithCapnp(t *testing.T) { - _, collapsedBn := getBnAndCollapsedBn() - marsh := marshal.CapnpMarshalizer{} - bn := newBranchNode() + _, hasher := getTestMarshAndHasher() + marsh := &marshal.CapnpMarshalizer{} + + _, collapsedBn := getBnAndCollapsedBn(marsh, hasher) + collapsedBn.dirty = false + + bn, _ := newBranchNode(marsh, hasher) encBn, err := marsh.Marshal(collapsedBn) assert.Nil(t, err) @@ -398,6 +421,7 @@ func TestMarshalingAndUnmarshalingWithCapnp(t *testing.T) { func TestKeyBytesToHex(t *testing.T) { t.Parallel() + var test = []struct { key, hex []byte }{ @@ -437,6 +461,7 @@ func TestHexToKeyBytesInvalidLength(t *testing.T) { func TestPrefixLen(t *testing.T) { t.Parallel() + var test = []struct { a, b []byte length int @@ -455,17 +480,154 @@ func TestPrefixLen(t *testing.T) { } } -func TestPatriciaMerkleTrie_GetAllLeafsCollapsedTrie(t *testing.T) { +func TestGetOldHashesIfNodeIsCollapsed(t *testing.T) { t.Parallel() - db, _ := mock.NewMemDbMock() - marshalizer, hasher := getTestMarshAndHasher() - tr, _ := NewTrie(db, marshalizer, hasher) + tr := initTrie() + + rootHash, _ := tr.Root() + rootKey := []byte{6, 4, 6, 15, 6} + nextNode, _, _ := tr.root.getNext(rootKey, tr.Database()) - _ = tr.Update([]byte("doe"), []byte("reindeer")) - _ = tr.Update([]byte("dog"), []byte("puppy")) - _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + + tr.root = &extensionNode{ + CollapsedEn: protobuf.CollapsedEn{ + Key: rootKey, + EncodedChild: nextNode.getHash(), + }, + child: nil, + baseNode: &baseNode{ + hash: rootHash, + dirty: false, + marsh: tr.marshalizer, + hasher: tr.hasher, + }, + } + _ = tr.Update([]byte("doeee"), []byte("value of doeee")) + + assert.Equal(t, 3, len(tr.oldHashes)) +} + +func TestClearOldHashesAndOldRootOnCommit(t *testing.T) { + t.Parallel() + + tr := initTrie() + _ = tr.Commit() + root, _ := tr.Root() + + _ = tr.Update([]byte("doeee"), []byte("value of doeee")) + + assert.Equal(t, 3, len(tr.oldHashes)) + assert.Equal(t, root, tr.oldRoot) + + _ = tr.Commit() + + assert.Equal(t, 0, len(tr.oldHashes)) + assert.Equal(t, 0, len(tr.oldRoot)) +} + +func TestTrieResetOldHashes(t *testing.T) { + t.Parallel() + + tr := initTrie() + _ = tr.Commit() + + _ = tr.Update([]byte("doeee"), []byte("value of doeee")) + + assert.NotEqual(t, 0, len(tr.oldHashes)) + assert.NotEqual(t, 0, len(tr.oldRoot)) + + expectedHashes := tr.oldHashes + hashes := tr.ResetOldHashes() + assert.Equal(t, expectedHashes, hashes) + assert.Equal(t, 0, len(tr.oldHashes)) + assert.Equal(t, 0, len(tr.oldRoot)) +} + +func TestTrieAddHashesToOldHashes(t *testing.T) { + t.Parallel() + + hashes := [][]byte{[]byte("one"), []byte("two"), []byte("three")} + tr := initTrie() + _ = tr.Commit() + + _ = tr.Update([]byte("doeee"), []byte("value of doeee")) + + expectedHLength := len(tr.oldHashes) + len(hashes) + tr.AppendToOldHashes(hashes) + assert.Equal(t, expectedHLength, len(tr.oldHashes)) +} + +func TestNode_getDirtyHashes(t *testing.T) { + t.Parallel() + + tr := initTrie() + + hashes, err := tr.root.getDirtyHashes() + assert.Nil(t, err) + assert.NotNil(t, hashes) + assert.Equal(t, 6, len(hashes)) +} + +func TestPruningAndPruningCancellingOnTrieRollback(t *testing.T) { + t.Parallel() + + testVals := []struct { + key []byte + value []byte + }{ + {[]byte("doe"), []byte("reindeer")}, + {[]byte("dog"), []byte("puppy")}, + {[]byte("dogglesworth"), []byte("cat")}, + {[]byte("horse"), []byte("stallion")}, + } + + tr, _, _ := newEmptyTrie() + + rootHashes := make([][]byte, 0) + for _, testVal := range testVals { + _ = tr.Update(testVal.key, testVal.value) + _ = tr.Commit() + rootHashes = append(rootHashes, tr.root.getHash()) + } + + for i := 0; i < len(rootHashes); i++ { + _, err := tr.Recreate(rootHashes[i]) + assert.Nil(t, err) + } + + tr.CancelPrune(rootHashes[0], data.NewRoot) + finalizeTrieState(t, 1, tr, rootHashes) + finalizeTrieState(t, 2, tr, rootHashes) + rollbackTrieState(t, 3, tr, rootHashes) + + _, err := tr.Recreate(rootHashes[2]) + assert.Nil(t, err) +} + +func finalizeTrieState(t *testing.T, index int, tr data.Trie, rootHashes [][]byte) { + err := tr.Prune(rootHashes[index-1], data.OldRoot) + assert.Nil(t, err) + tr.CancelPrune(rootHashes[index], data.NewRoot) + + _, err = tr.Recreate(rootHashes[index-1]) + assert.NotNil(t, err) +} + +func rollbackTrieState(t *testing.T, index int, tr data.Trie, rootHashes [][]byte) { + err := tr.Prune(rootHashes[index], data.NewRoot) + assert.Nil(t, err) + tr.CancelPrune(rootHashes[index-1], data.OldRoot) + + _, err = tr.Recreate(rootHashes[index]) + assert.NotNil(t, err) +} + +func TestPatriciaMerkleTrie_GetAllLeafsCollapsedTrie(t *testing.T) { + t.Parallel() + tr := initTrie() _ = tr.Commit() root, _ := tr.root.(*extensionNode) diff --git a/data/trie/patriciaMerkleTrie.go b/data/trie/patriciaMerkleTrie.go index 4f52980e7f0..66c9010259b 100644 --- a/data/trie/patriciaMerkleTrie.go +++ b/data/trie/patriciaMerkleTrie.go @@ -2,56 +2,85 @@ package trie import ( "bytes" + "encoding/hex" "fmt" "sync" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" ) +var log = logger.GetOrCreate("trie") + const ( extension = iota leaf branch ) +const maxSnapshots = 2 + var emptyTrieHash = make([]byte, 32) type patriciaMerkleTrie struct { - root node - db data.DBWriteCacher + root node + + trieStorage data.StorageManager marshalizer marshal.Marshalizer hasher hashing.Hasher mutOperation sync.RWMutex + + oldHashes [][]byte + oldRoot []byte } // NewTrie creates a new Patricia Merkle Trie -func NewTrie(db data.DBWriteCacher, msh marshal.Marshalizer, hsh hashing.Hasher) (*patriciaMerkleTrie, error) { - if db == nil || db.IsInterfaceNil() { - return nil, ErrNilDatabase +func NewTrie( + trieStorage data.StorageManager, + msh marshal.Marshalizer, + hsh hashing.Hasher, +) (*patriciaMerkleTrie, error) { + if check.IfNil(trieStorage) { + return nil, ErrNilTrieStorage } - if msh == nil || msh.IsInterfaceNil() { + if check.IfNil(msh) { return nil, ErrNilMarshalizer } - if hsh == nil || hsh.IsInterfaceNil() { + if check.IfNil(hsh) { return nil, ErrNilHasher } - return &patriciaMerkleTrie{db: db, marshalizer: msh, hasher: hsh}, nil + + return &patriciaMerkleTrie{ + trieStorage: trieStorage, + marshalizer: msh, + hasher: hsh, + oldHashes: make([][]byte, 0), + oldRoot: make([]byte, 0), + }, nil } // Get starts at the root and searches for the given key. // If the key is present in the tree, it returns the corresponding value func (tr *patriciaMerkleTrie) Get(key []byte) ([]byte, error) { - tr.mutOperation.RLock() - defer tr.mutOperation.RUnlock() + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() if tr.root == nil { return nil, nil } hexKey := keyBytesToHex(key) - return tr.root.tryGet(hexKey, tr.db, tr.marshalizer) + val, err := tr.root.tryGet(hexKey, tr.trieStorage.Database()) + if err != nil { + err = fmt.Errorf("trie get error: %w, for key %v", err, hex.EncodeToString(key)) + return nil, err + } + + return val, nil } // Update updates the value at the given key. @@ -62,27 +91,51 @@ func (tr *patriciaMerkleTrie) Update(key, value []byte) error { defer tr.mutOperation.Unlock() hexKey := keyBytesToHex(key) - node := newLeafNode(hexKey, value) + newLn, err := newLeafNode(hexKey, value, tr.marshalizer, tr.hasher) + if err != nil { + return err + } + + var newRoot node + var oldHashes [][]byte if len(value) != 0 { if tr.root == nil { - tr.root = newLeafNode(hexKey, value) + newRoot, err = newLeafNode(hexKey, value, tr.marshalizer, tr.hasher) + if err != nil { + return err + } + + tr.root = newRoot return nil } - _, newRoot, err := tr.root.insert(node, tr.db, tr.marshalizer) + + if !tr.root.isDirty() { + tr.oldRoot = tr.root.getHash() + } + + _, newRoot, oldHashes, err = tr.root.insert(newLn, tr.trieStorage.Database()) if err != nil { return err } tr.root = newRoot + tr.oldHashes = append(tr.oldHashes, oldHashes...) } else { if tr.root == nil { return nil } - _, newRoot, err := tr.root.delete(hexKey, tr.db, tr.marshalizer) + + if !tr.root.isDirty() { + tr.oldRoot = tr.root.getHash() + } + + _, newRoot, oldHashes, err = tr.root.delete(hexKey, tr.trieStorage.Database()) if err != nil { return err } tr.root = newRoot + tr.oldHashes = append(tr.oldHashes, oldHashes...) } + return nil } @@ -95,18 +148,25 @@ func (tr *patriciaMerkleTrie) Delete(key []byte) error { if tr.root == nil { return nil } - _, newRoot, err := tr.root.delete(hexKey, tr.db, tr.marshalizer) + + if !tr.root.isDirty() { + tr.oldRoot = tr.root.getHash() + } + + _, newRoot, oldHashes, err := tr.root.delete(hexKey, tr.trieStorage.Database()) if err != nil { return err } tr.root = newRoot + tr.oldHashes = append(tr.oldHashes, oldHashes...) + return nil } // Root returns the hash of the root node func (tr *patriciaMerkleTrie) Root() ([]byte, error) { - tr.mutOperation.RLock() - defer tr.mutOperation.RUnlock() + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() if tr.root == nil { return emptyTrieHash, nil @@ -116,7 +176,7 @@ func (tr *patriciaMerkleTrie) Root() ([]byte, error) { if hash != nil { return hash, nil } - err := tr.root.setRootHash(tr.marshalizer, tr.hasher) + err := tr.root.setRootHash() if err != nil { return nil, err } @@ -125,8 +185,8 @@ func (tr *patriciaMerkleTrie) Root() ([]byte, error) { // Prove returns the Merkle proof for the given key func (tr *patriciaMerkleTrie) Prove(key []byte) ([][]byte, error) { - tr.mutOperation.RLock() - defer tr.mutOperation.RUnlock() + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() if tr.root == nil { return nil, ErrNilNode @@ -134,25 +194,26 @@ func (tr *patriciaMerkleTrie) Prove(key []byte) ([][]byte, error) { var proof [][]byte hexKey := keyBytesToHex(key) - node := tr.root + n := tr.root - err := node.setRootHash(tr.marshalizer, tr.hasher) + err := n.setRootHash() if err != nil { return nil, err } + var encNode []byte for { - encNode, err := node.getEncodedNode(tr.marshalizer) + encNode, err = n.getEncodedNode() if err != nil { return nil, err } proof = append(proof, encNode) - node, hexKey, err = node.getNext(hexKey, tr.db, tr.marshalizer) + n, hexKey, err = n.getNext(hexKey, tr.trieStorage.Database()) if err != nil { return nil, err } - if node == nil { + if n == nil { return proof, nil } } @@ -160,14 +221,14 @@ func (tr *patriciaMerkleTrie) Prove(key []byte) ([][]byte, error) { // VerifyProof checks Merkle proofs. func (tr *patriciaMerkleTrie) VerifyProof(proofs [][]byte, key []byte) (bool, error) { - tr.mutOperation.RLock() - defer tr.mutOperation.RUnlock() - wantHash, err := tr.Root() if err != nil { return false, err } + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + key = keyBytesToHex(key) for i := range proofs { encNode := proofs[i] @@ -180,7 +241,8 @@ func (tr *patriciaMerkleTrie) VerifyProof(proofs [][]byte, key []byte) (bool, er return false, nil } - n, err := decodeNode(encNode, tr.marshalizer) + var n node + n, err = decodeNode(encNode, tr.marshalizer, tr.hasher) if err != nil { return false, err } @@ -215,11 +277,36 @@ func (tr *patriciaMerkleTrie) Commit() error { if tr.root.isCollapsed() { return nil } - err := tr.root.setRootHash(tr.marshalizer, tr.hasher) + err := tr.root.setRootHash() if err != nil { return err } - err = tr.root.commit(0, tr.db, tr.marshalizer, tr.hasher) + + newRoot := tr.root.getHash() + newHashes, err := tr.root.getDirtyHashes() + if err != nil { + return err + } + + if len(newHashes) > 0 && len(newRoot) > 0 { + newRoot = append(newRoot, byte(data.NewRoot)) + err = tr.trieStorage.MarkForEviction(newRoot, newHashes) + if err != nil { + return err + } + } + + if len(tr.oldHashes) > 0 && len(tr.oldRoot) > 0 { + tr.oldRoot = append(tr.oldRoot, byte(data.OldRoot)) + err = tr.trieStorage.MarkForEviction(tr.oldRoot, tr.oldHashes) + if err != nil { + return err + } + tr.oldRoot = make([]byte, 0) + tr.oldHashes = make([][]byte, 0) + } + + err = tr.root.commit(false, 0, tr.trieStorage.Database(), tr.trieStorage.Database()) if err != nil { return err } @@ -231,26 +318,21 @@ func (tr *patriciaMerkleTrie) Recreate(root []byte) (data.Trie, error) { tr.mutOperation.Lock() defer tr.mutOperation.Unlock() - newTr, err := NewTrie(tr.db, tr.marshalizer, tr.hasher) - if err != nil { - return nil, err - } - if emptyTrie(root) { - return newTr, nil - } - - encRoot, err := tr.db.Get(root) - if err != nil { - return nil, err + clonedTrieStorage := tr.trieStorage.Clone() + return NewTrie( + clonedTrieStorage, + tr.marshalizer, + tr.hasher, + ) } - newRoot, err := decodeNode(encRoot, tr.marshalizer) + newTr, err := tr.recreateFromDb(root) if err != nil { + err = fmt.Errorf("trie recreate error: %w, for root %v", err, core.ToB64(root)) return nil, err } - newTr.root = newRoot return newTr, nil } @@ -259,7 +341,12 @@ func (tr *patriciaMerkleTrie) DeepClone() (data.Trie, error) { tr.mutOperation.Lock() defer tr.mutOperation.Unlock() - clonedTrie, err := NewTrie(tr.db, tr.marshalizer, tr.hasher) + clonedTrieStorage := tr.trieStorage.Clone() + clonedTrie, err := NewTrie( + clonedTrieStorage, + tr.marshalizer, + tr.hasher, + ) if err != nil { return nil, err } @@ -288,14 +375,10 @@ func (tr *patriciaMerkleTrie) String() string { // IsInterfaceNil returns true if there is no value under the interface func (tr *patriciaMerkleTrie) IsInterfaceNil() bool { - if tr == nil { - return true - } - return false + return tr == nil } func emptyTrie(root []byte) bool { - if bytes.Equal(root, make([]byte, 0)) { return true } @@ -305,6 +388,131 @@ func emptyTrie(root []byte) bool { return false } +// Prune removes from the database all the old hashes that correspond to the given root hash +func (tr *patriciaMerkleTrie) Prune(rootHash []byte, identifier data.TriePruningIdentifier) error { + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + + rootHash = append(rootHash, byte(identifier)) + return tr.trieStorage.Prune(rootHash) +} + +// CancelPrune invalidates the hashes that correspond to the given root hash from the eviction waiting list +func (tr *patriciaMerkleTrie) CancelPrune(rootHash []byte, identifier data.TriePruningIdentifier) { + tr.mutOperation.Lock() + rootHash = append(rootHash, byte(identifier)) + tr.trieStorage.CancelPrune(rootHash) + tr.mutOperation.Unlock() +} + +// AppendToOldHashes appends the given hashes to the trie's oldHashes variable +func (tr *patriciaMerkleTrie) AppendToOldHashes(hashes [][]byte) { + tr.mutOperation.Lock() + tr.oldHashes = append(tr.oldHashes, hashes...) + tr.mutOperation.Unlock() +} + +// ResetOldHashes resets the oldHashes and oldRoot variables and returns the old hashes +func (tr *patriciaMerkleTrie) ResetOldHashes() [][]byte { + tr.mutOperation.Lock() + oldHashes := tr.oldHashes + tr.oldHashes = make([][]byte, 0) + tr.oldRoot = make([]byte, 0) + tr.mutOperation.Unlock() + + return oldHashes +} + +// SetCheckpoint adds the current state of the trie to the snapshot database +func (tr *patriciaMerkleTrie) SetCheckpoint(rootHash []byte) { + tr.trieStorage.SetCheckpoint(rootHash, tr.marshalizer, tr.hasher) +} + +// TakeSnapshot creates a new database in which the current state of the trie is saved. +// If the maximum number of snapshots has been reached, the oldest snapshot is removed. +func (tr *patriciaMerkleTrie) TakeSnapshot(rootHash []byte) { + tr.trieStorage.TakeSnapshot(rootHash, tr.marshalizer, tr.hasher) +} + +// Database returns the trie database +func (tr *patriciaMerkleTrie) Database() data.DBWriteCacher { + return tr.trieStorage.Database() +} + +func (tr *patriciaMerkleTrie) recreateFromDb(rootHash []byte) (data.Trie, error) { + db := tr.trieStorage.GetDbThatContainsHash(rootHash) + if db == nil { + return nil, ErrHashNotFound + } + + clonedTrieStorage := tr.trieStorage.Clone() + newTr, err := NewTrie( + clonedTrieStorage, + tr.marshalizer, + tr.hasher, + ) + if err != nil { + return nil, err + } + newTr.trieStorage.SetDatabase(db) + + newRoot, err := getNodeFromDBAndDecode(rootHash, db, tr.marshalizer, tr.hasher) + if err != nil { + return nil, err + } + + newRoot.setGivenHash(rootHash) + newTr.root = newRoot + return newTr, nil +} + +// GetSerializedNodes returns a batch of serialized nodes from the trie, starting from the given hash +func (tr *patriciaMerkleTrie) GetSerializedNodes(rootHash []byte, maxBuffToSend uint64) ([][]byte, error) { + tr.mutOperation.Lock() + defer tr.mutOperation.Unlock() + + size := uint64(0) + + newTr, err := tr.recreateFromDb(rootHash) + if err != nil { + return nil, err + } + + it, err := NewIterator(newTr) + if err != nil { + return nil, err + } + + encNode, err := it.MarshalizedNode() + if err != nil { + return nil, err + } + + nodes := make([][]byte, 0) + nodes = append(nodes, encNode) + size += uint64(len(encNode)) + + for it.HasNext() { + err = it.Next() + if err != nil { + return nil, err + } + + encNode, err = it.MarshalizedNode() + if err != nil { + return nil, err + } + + if size+uint64(len(encNode)) > maxBuffToSend { + return nodes, nil + } + nodes = append(nodes, encNode) + size += uint64(len(encNode)) + } + + return nodes, nil +} + // GetAllLeaves iterates the trie and returns a map that contains all leafNodes information func (tr *patriciaMerkleTrie) GetAllLeaves() (map[string][]byte, error) { if tr.root == nil { @@ -312,10 +520,15 @@ func (tr *patriciaMerkleTrie) GetAllLeaves() (map[string][]byte, error) { } leaves := make(map[string][]byte) - err := tr.root.getAllLeaves(leaves, []byte{}, tr.db, tr.marshalizer) + err := tr.root.getAllLeaves(leaves, []byte{}, tr.Database(), tr.marshalizer) if err != nil { return nil, err } return leaves, nil } + +// IsPruningEnabled returns true if state pruning is enabled +func (tr *patriciaMerkleTrie) IsPruningEnabled() bool { + return tr.trieStorage.IsPruningEnabled() +} diff --git a/data/trie/patriciaMerkleTrie_test.go b/data/trie/patriciaMerkleTrie_test.go index a70d65e4374..5ef1cd5f180 100644 --- a/data/trie/patriciaMerkleTrie_test.go +++ b/data/trie/patriciaMerkleTrie_test.go @@ -1,23 +1,57 @@ package trie_test import ( + "encoding/base64" + "fmt" + "io/ioutil" + "math/rand" "strconv" + "sync" "testing" + "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/keccak" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" ) -var marshalizer = &mock.ProtobufMarshalizerMock{} -var hasher = &mock.KeccakMock{} var emptyTrieHash = make([]byte, 32) +func emptyTrie() data.Trie { + tr, _ := trie.NewTrie(getDefaultTrieParameters()) + + return tr +} + +func getDefaultTrieParameters() (data.StorageManager, marshal.Marshalizer, hashing.Hasher) { + db := mock.NewMemDbMock() + marshalizer := &mock.ProtobufMarshalizerMock{} + hasher := &mock.KeccakMock{} + + tempDir, _ := ioutil.TempDir("", strconv.Itoa(rand.Intn(100000))) + + cfg := &config.DBConfig{ + FilePath: tempDir, + Type: string(storageUnit.LvlDbSerial), + BatchDelaySeconds: 1, + MaxBatchSize: 1, + MaxOpenFiles: 10, + } + + evictionWaitingList, _ := mock.NewEvictionWaitingList(100, mock.NewMemDbMock(), marshalizer) + trieStorageManager, _ := trie.NewTrieStorageManager(db, cfg, evictionWaitingList) + + return trieStorageManager, marshalizer, hasher +} + func initTrieMultipleValues(nr int) (data.Trie, [][]byte) { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) + tr := emptyTrie() var values [][]byte hsh := keccak.Keccak{} @@ -31,9 +65,7 @@ func initTrieMultipleValues(nr int) (data.Trie, [][]byte) { } func initTrie() data.Trie { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) - + tr := emptyTrie() _ = tr.Update([]byte("doe"), []byte("reindeer")) _ = tr.Update([]byte("dog"), []byte("puppy")) _ = tr.Update([]byte("dogglesworth"), []byte("cat")) @@ -41,30 +73,39 @@ func initTrie() data.Trie { return tr } -func TestNewTrieWithNilDB(t *testing.T) { +func TestNewTrieWithNilTrieStorage(t *testing.T) { + t.Parallel() + + _, marshalizer, hasher := getDefaultTrieParameters() tr, err := trie.NewTrie(nil, marshalizer, hasher) assert.Nil(t, tr) - assert.NotNil(t, err) + assert.Equal(t, trie.ErrNilTrieStorage, err) } func TestNewTrieWithNilMarshalizer(t *testing.T) { - db, _ := mock.NewMemDbMock() - tr, err := trie.NewTrie(db, nil, hasher) + t.Parallel() + + trieStorage, _, hasher := getDefaultTrieParameters() + tr, err := trie.NewTrie(trieStorage, nil, hasher) assert.Nil(t, tr) - assert.NotNil(t, err) + assert.Equal(t, trie.ErrNilMarshalizer, err) } func TestNewTrieWithNilHasher(t *testing.T) { - db, _ := mock.NewMemDbMock() - tr, err := trie.NewTrie(db, marshalizer, nil) + t.Parallel() + + trieStorage, marshalizer, _ := getDefaultTrieParameters() + tr, err := trie.NewTrie(trieStorage, marshalizer, nil) assert.Nil(t, tr) - assert.NotNil(t, err) + assert.Equal(t, trie.ErrNilHasher, err) } func TestPatriciaMerkleTree_Get(t *testing.T) { + t.Parallel() + tr, val := initTrieMultipleValues(10000) for i := range val { @@ -74,8 +115,9 @@ func TestPatriciaMerkleTree_Get(t *testing.T) { } func TestPatriciaMerkleTree_GetEmptyTrie(t *testing.T) { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) + t.Parallel() + + tr := emptyTrie() val, err := tr.Get([]byte("dog")) assert.Nil(t, err) @@ -83,6 +125,8 @@ func TestPatriciaMerkleTree_GetEmptyTrie(t *testing.T) { } func TestPatriciaMerkleTree_Update(t *testing.T) { + t.Parallel() + tr := initTrie() newVal := []byte("doge") @@ -93,6 +137,8 @@ func TestPatriciaMerkleTree_Update(t *testing.T) { } func TestPatriciaMerkleTree_UpdateEmptyVal(t *testing.T) { + t.Parallel() + tr := initTrie() var empty []byte @@ -103,6 +149,8 @@ func TestPatriciaMerkleTree_UpdateEmptyVal(t *testing.T) { } func TestPatriciaMerkleTree_UpdateNotExisting(t *testing.T) { + t.Parallel() + tr := initTrie() _ = tr.Update([]byte("does"), []byte("this")) @@ -112,6 +160,8 @@ func TestPatriciaMerkleTree_UpdateNotExisting(t *testing.T) { } func TestPatriciaMerkleTree_Delete(t *testing.T) { + t.Parallel() + tr := initTrie() var empty []byte @@ -122,14 +172,17 @@ func TestPatriciaMerkleTree_Delete(t *testing.T) { } func TestPatriciaMerkleTree_DeleteEmptyTrie(t *testing.T) { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) + t.Parallel() + + tr := emptyTrie() err := tr.Delete([]byte("dog")) assert.Nil(t, err) } func TestPatriciaMerkleTree_Root(t *testing.T) { + t.Parallel() + tr := initTrie() root, err := tr.Root() @@ -138,8 +191,9 @@ func TestPatriciaMerkleTree_Root(t *testing.T) { } func TestPatriciaMerkleTree_NilRoot(t *testing.T) { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) + t.Parallel() + + tr := emptyTrie() root, err := tr.Root() assert.Nil(t, err) @@ -147,6 +201,8 @@ func TestPatriciaMerkleTree_NilRoot(t *testing.T) { } func TestPatriciaMerkleTree_Prove(t *testing.T) { + t.Parallel() + tr := initTrie() proof, err := tr.Prove([]byte("dog")) @@ -156,6 +212,8 @@ func TestPatriciaMerkleTree_Prove(t *testing.T) { } func TestPatriciaMerkleTree_ProveCollapsedTrie(t *testing.T) { + t.Parallel() + tr := initTrie() _ = tr.Commit() @@ -166,8 +224,9 @@ func TestPatriciaMerkleTree_ProveCollapsedTrie(t *testing.T) { } func TestPatriciaMerkleTree_ProveOnEmptyTrie(t *testing.T) { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) + t.Parallel() + + tr := emptyTrie() proof, err := tr.Prove([]byte("dog")) assert.Nil(t, proof) @@ -175,6 +234,8 @@ func TestPatriciaMerkleTree_ProveOnEmptyTrie(t *testing.T) { } func TestPatriciaMerkleTree_VerifyProof(t *testing.T) { + t.Parallel() + tr, val := initTrieMultipleValues(50) for i := range val { @@ -192,6 +253,8 @@ func TestPatriciaMerkleTree_VerifyProof(t *testing.T) { } func TestPatriciaMerkleTree_VerifyProofNilProofs(t *testing.T) { + t.Parallel() + tr := initTrie() ok, err := tr.VerifyProof(nil, []byte("dog")) @@ -200,6 +263,8 @@ func TestPatriciaMerkleTree_VerifyProofNilProofs(t *testing.T) { } func TestPatriciaMerkleTree_VerifyProofEmptyProofs(t *testing.T) { + t.Parallel() + tr := initTrie() ok, err := tr.VerifyProof([][]byte{}, []byte("dog")) @@ -208,6 +273,8 @@ func TestPatriciaMerkleTree_VerifyProofEmptyProofs(t *testing.T) { } func TestPatriciaMerkleTree_Consistency(t *testing.T) { + t.Parallel() + tr := initTrie() root1, _ := tr.Root() @@ -221,7 +288,47 @@ func TestPatriciaMerkleTree_Consistency(t *testing.T) { assert.NotEqual(t, root1, root2) } +func TestPatriciaMerkleTrie_UpdateAndGetConcurrently(t *testing.T) { + t.Parallel() + + tr := emptyTrie() + nrInserts := 100 + wg := &sync.WaitGroup{} + wg.Add(nrInserts) + + for i := 0; i < nrInserts; i++ { + go func(index int) { + err := tr.Update([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) + assert.Nil(t, err) + + val, err := tr.Get([]byte(strconv.Itoa(index))) + assert.Nil(t, err) + assert.Equal(t, []byte(strconv.Itoa(index)), val) + + wg.Done() + }(i) + } + wg.Wait() + + rootHash, _ := tr.Root() + assert.NotEqual(t, emptyTrieHash, rootHash) + + wg.Add(nrInserts) + for i := 0; i < nrInserts; i++ { + go func(index int) { + assert.Nil(t, tr.Delete([]byte(strconv.Itoa(index)))) + wg.Done() + }(i) + } + wg.Wait() + + rootHash, _ = tr.Root() + assert.Equal(t, emptyTrieHash, rootHash) +} + func TestPatriciaMerkleTree_Commit(t *testing.T) { + t.Parallel() + tr := initTrie() err := tr.Commit() @@ -229,6 +336,8 @@ func TestPatriciaMerkleTree_Commit(t *testing.T) { } func TestPatriciaMerkleTree_CommitCollapsesTrieOk(t *testing.T) { + t.Parallel() + tr := initTrie() _ = tr.Update([]byte("zebra"), []byte("zebra")) @@ -240,6 +349,8 @@ func TestPatriciaMerkleTree_CommitCollapsesTrieOk(t *testing.T) { } func TestPatriciaMerkleTree_CommitAfterCommit(t *testing.T) { + t.Parallel() + tr := initTrie() _ = tr.Commit() @@ -248,14 +359,17 @@ func TestPatriciaMerkleTree_CommitAfterCommit(t *testing.T) { } func TestPatriciaMerkleTree_CommitEmptyRoot(t *testing.T) { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) + t.Parallel() + + tr := emptyTrie() err := tr.Commit() assert.Nil(t, err) } func TestPatriciaMerkleTree_GetAfterCommit(t *testing.T) { + t.Parallel() + tr := initTrie() err := tr.Commit() @@ -267,6 +381,8 @@ func TestPatriciaMerkleTree_GetAfterCommit(t *testing.T) { } func TestPatriciaMerkleTree_InsertAfterCommit(t *testing.T) { + t.Parallel() + tr1 := initTrie() tr2 := initTrie() @@ -280,10 +396,11 @@ func TestPatriciaMerkleTree_InsertAfterCommit(t *testing.T) { root2, _ := tr2.Root() assert.Equal(t, root2, root1) - } func TestPatriciaMerkleTree_DeleteAfterCommit(t *testing.T) { + t.Parallel() + tr1 := initTrie() tr2 := initTrie() @@ -300,6 +417,8 @@ func TestPatriciaMerkleTree_DeleteAfterCommit(t *testing.T) { } func TestPatriciaMerkleTrie_Recreate(t *testing.T) { + t.Parallel() + tr := initTrie() rootHash, _ := tr.Root() _ = tr.Commit() @@ -313,6 +432,8 @@ func TestPatriciaMerkleTrie_Recreate(t *testing.T) { } func TestPatriciaMerkleTrie_RecreateWithInvalidRootHash(t *testing.T) { + t.Parallel() + tr := initTrie() newTr, err := tr.Recreate(nil) @@ -321,13 +442,9 @@ func TestPatriciaMerkleTrie_RecreateWithInvalidRootHash(t *testing.T) { assert.Equal(t, emptyTrieHash, root) } -func emptyTrie() data.Trie { - db, _ := mock.NewMemDbMock() - tr, _ := trie.NewTrie(db, marshalizer, hasher) - return tr -} - func TestPatriciaMerkleTrie_VerifyProofFromDifferentTrieShouldNotWork(t *testing.T) { + t.Parallel() + tr1 := emptyTrie() tr2 := emptyTrie() @@ -345,6 +462,8 @@ func TestPatriciaMerkleTrie_VerifyProofFromDifferentTrieShouldNotWork(t *testing } func TestPatriciaMerkleTrie_VerifyProofBranchNodeWantHashShouldWork(t *testing.T) { + t.Parallel() + tr := emptyTrie() _ = tr.Update([]byte("dog"), []byte("cat")) @@ -357,6 +476,8 @@ func TestPatriciaMerkleTrie_VerifyProofBranchNodeWantHashShouldWork(t *testing.T } func TestPatriciaMerkleTrie_VerifyProofExtensionNodeWantHashShouldWork(t *testing.T) { + t.Parallel() + tr := emptyTrie() _ = tr.Update([]byte("dog"), []byte("cat")) @@ -387,6 +508,94 @@ func TestPatriciaMerkleTrie_DeepCloneShouldWork(t *testing.T) { assert.Equal(t, originalRoot, clonedTrie) } +func TestPatriciaMerkleTrie_PruneAfterCancelPruneShouldFail(t *testing.T) { + t.Parallel() + + tr := initTrie() + _ = tr.Commit() + rootHash, _ := tr.Root() + + _ = tr.Update([]byte("dog"), []byte("value of dog")) + _ = tr.Commit() + + tr.CancelPrune(rootHash, data.OldRoot) + + key := base64.StdEncoding.EncodeToString(append(rootHash, byte(data.OldRoot))) + err := fmt.Errorf("key: %s not found", key) + expectedErr := fmt.Errorf("trie storage manager prune error: %w, for root %v", err, key) + + err = tr.Prune(rootHash, data.OldRoot) + assert.Equal(t, expectedErr, err) +} + +func TestPatriciaMerkleTrie_Prune(t *testing.T) { + t.Parallel() + + tr, _ := trie.NewTrie(getDefaultTrieParameters()) + + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + rootHash, _ := tr.Root() + + _ = tr.Update([]byte("dog"), []byte("value of dog")) + _ = tr.Commit() + + _ = tr.Prune(rootHash, data.OldRoot) + + expectedErr := fmt.Errorf("key: %s not found", base64.StdEncoding.EncodeToString(rootHash)) + val, err := tr.Database().Get(rootHash) + assert.Nil(t, val) + assert.Equal(t, expectedErr, err) +} + +func TestPatriciaMerkleTrie_GetSerializedNodes(t *testing.T) { + t.Parallel() + + tr := initTrie() + _ = tr.Commit() + rootHash, _ := tr.Root() + + maxBuffToSend := uint64(500) + expectedNodes := 6 + serializedNodes, err := tr.GetSerializedNodes(rootHash, maxBuffToSend) + assert.Nil(t, err) + assert.Equal(t, expectedNodes, len(serializedNodes)) +} + +func TestPatriciaMerkleTrie_GetSerializedNodesTinyBufferShouldNotGetAllNodes(t *testing.T) { + t.Parallel() + + tr := initTrie() + _ = tr.Commit() + rootHash, _ := tr.Root() + + maxBuffToSend := uint64(150) + expectedNodes := 2 + serializedNodes, err := tr.GetSerializedNodes(rootHash, maxBuffToSend) + assert.Nil(t, err) + assert.Equal(t, expectedNodes, len(serializedNodes)) +} + +func TestPatriciaMerkleTrie_GetSerializedNodesGetFromSnapshot(t *testing.T) { + t.Parallel() + + tr := initTrie() + _ = tr.Commit() + rootHash, _ := tr.Root() + + tr.TakeSnapshot(rootHash) + time.Sleep(time.Second) + _ = tr.Prune(rootHash, data.NewRoot) + + maxBuffToSend := uint64(500) + expectedNodes := 6 + serializedNodes, err := tr.GetSerializedNodes(rootHash, maxBuffToSend) + assert.Nil(t, err) + assert.Equal(t, expectedNodes, len(serializedNodes)) +} + func TestPatriciaMerkleTrie_GetAllLeaves(t *testing.T) { t.Parallel() @@ -589,8 +798,8 @@ func BenchmarkPatriciaMerkleTree_Commit(b *testing.B) { b.StopTimer() hsh := keccak.Keccak{} tr := emptyTrie() - for i := 0; i < nrValuesInTrie; i++ { - hash := hsh.Compute(strconv.Itoa(i)) + for j := 0; j < nrValuesInTrie; j++ { + hash := hsh.Compute(strconv.Itoa(j)) _ = tr.Update(hash, hash) } b.StartTimer() diff --git a/data/trie/snapshotsQueue.go b/data/trie/snapshotsQueue.go new file mode 100644 index 00000000000..b2db7c1c0d6 --- /dev/null +++ b/data/trie/snapshotsQueue.go @@ -0,0 +1,71 @@ +package trie + +import ( + "sync" +) + +type snapshotsQueue struct { + queue []*snapshotsQueueEntry + mut sync.RWMutex +} + +type snapshotsQueueEntry struct { + rootHash []byte + newDb bool +} + +func newSnapshotsQueue() *snapshotsQueue { + return &snapshotsQueue{ + queue: make([]*snapshotsQueueEntry, 0), + } +} + +func (sq *snapshotsQueue) add(rootHash []byte, newDb bool) { + sq.mut.Lock() + newSnapshot := &snapshotsQueueEntry{ + rootHash: rootHash, + newDb: newDb, + } + sq.queue = append(sq.queue, newSnapshot) + sq.mut.Unlock() +} + +func (sq *snapshotsQueue) len() int { + sq.mut.Lock() + defer sq.mut.Unlock() + + return len(sq.queue) +} + +func (sq *snapshotsQueue) clone() snapshotsBuffer { + sq.mut.Lock() + + newQueue := make([]*snapshotsQueueEntry, len(sq.queue)) + for i := range newQueue { + newQueue[i] = &snapshotsQueueEntry{ + rootHash: sq.queue[i].rootHash, + newDb: sq.queue[i].newDb, + } + } + + sq.mut.Unlock() + + return &snapshotsQueue{queue: newQueue} +} + +func (sq *snapshotsQueue) getFirst() *snapshotsQueueEntry { + sq.mut.Lock() + defer sq.mut.Unlock() + + return sq.queue[0] +} + +func (sq *snapshotsQueue) removeFirst() { + sq.mut.Lock() + + if len(sq.queue) != 0 { + sq.queue = sq.queue[1:] + } + + sq.mut.Unlock() +} diff --git a/data/trie/snapshotsQueue_test.go b/data/trie/snapshotsQueue_test.go new file mode 100644 index 00000000000..dcc444b2f8d --- /dev/null +++ b/data/trie/snapshotsQueue_test.go @@ -0,0 +1,108 @@ +package trie + +import ( + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewSnapshotsQueue(t *testing.T) { + t.Parallel() + + assert.NotNil(t, newSnapshotsQueue()) +} + +func TestSnapshotsQueue_Add(t *testing.T) { + t.Parallel() + + sq := newSnapshotsQueue() + sq.add([]byte("root hash"), true) + + assert.Equal(t, 1, len(sq.queue)) + assert.Equal(t, []byte("root hash"), sq.queue[0].rootHash) + assert.True(t, sq.queue[0].newDb) +} + +func TestSnapshotsQueue_AddConcurrently(t *testing.T) { + sq := newSnapshotsQueue() + numSnapshots := 100 + + wg := sync.WaitGroup{} + wg.Add(numSnapshots) + + for i := 0; i < numSnapshots; i++ { + go func(index int) { + sq.add([]byte(strconv.Itoa(index)), true) + wg.Done() + }(i) + } + + wg.Wait() + + assert.Equal(t, numSnapshots, len(sq.queue)) +} + +func TestSnapshotsQueue_Len(t *testing.T) { + t.Parallel() + + sq := newSnapshotsQueue() + numSnapshots := 100 + + for i := 0; i < numSnapshots; i++ { + sq.add([]byte(strconv.Itoa(i)), true) + } + + assert.Equal(t, numSnapshots, sq.len()) +} + +func TestSnapshotsQueue_Clone(t *testing.T) { + t.Parallel() + + sq := newSnapshotsQueue() + sq.add([]byte("root hash"), true) + + newSq := sq.clone() + assert.Equal(t, sq.len(), newSq.len()) + + newSq, _ = newSq.(*snapshotsQueue) + assert.True(t, sq != newSq) + + sq.queue[0].newDb = false + assert.True(t, newSq.getFirst().newDb) + + sq.add([]byte("root hash1"), true) + assert.NotEqual(t, sq.len(), newSq.len()) +} + +func TestSnapshotsQueue_GetFirst(t *testing.T) { + t.Parallel() + + sq := newSnapshotsQueue() + numSnapshots := 10 + + for i := 0; i < numSnapshots; i++ { + sq.add([]byte(strconv.Itoa(i)), true) + } + + firstEntry := sq.getFirst() + assert.Equal(t, []byte(strconv.Itoa(0)), firstEntry.rootHash) + assert.True(t, firstEntry.newDb) +} + +func TestSnapshotsQueue_RemoveFirst(t *testing.T) { + t.Parallel() + + sq := newSnapshotsQueue() + numSnapshots := 2 + + for i := 0; i < numSnapshots; i++ { + sq.add([]byte(strconv.Itoa(i)), true) + } + + sq.removeFirst() + assert.False(t, sq.len() == 0) + sq.removeFirst() + assert.True(t, sq.len() == 0) +} diff --git a/data/trie/sync.go b/data/trie/sync.go new file mode 100644 index 00000000000..b6fa579415c --- /dev/null +++ b/data/trie/sync.go @@ -0,0 +1,186 @@ +package trie + +import ( + "bytes" + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type trieSyncer struct { + trie *patriciaMerkleTrie + resolver dataRetriever.Resolver + interceptedNodes storage.Cacher + chRcvTrieNodes chan bool + waitTime time.Duration + + requestedHashes [][]byte + requestedHashesMutex sync.Mutex +} + +// NewTrieSyncer creates a new instance of trieSyncer +func NewTrieSyncer( + resolver dataRetriever.Resolver, + interceptedNodes storage.Cacher, + trie data.Trie, + waitTime time.Duration, +) (*trieSyncer, error) { + if check.IfNil(resolver) { + return nil, ErrNilResolver + } + if check.IfNil(interceptedNodes) { + return nil, data.ErrNilCacher + } + if check.IfNil(trie) { + return nil, ErrNilTrie + } + + pmt, ok := trie.(*patriciaMerkleTrie) + if !ok { + return nil, ErrWrongTypeAssertion + } + + return &trieSyncer{ + resolver: resolver, + interceptedNodes: interceptedNodes, + trie: pmt, + chRcvTrieNodes: make(chan bool), + requestedHashes: make([][]byte, 0), + waitTime: waitTime, + }, nil +} + +// StartSyncing completes the trie, asking for missing trie nodes on the network +func (ts *trieSyncer) StartSyncing(rootHash []byte) error { + if len(rootHash) == 0 { + return ErrInvalidHash + } + ts.interceptedNodes.RegisterHandler(ts.trieNodeIntercepted) + + currentNode, err := ts.getNode(rootHash) + if err != nil { + return err + } + + ts.trie.root = currentNode + err = ts.trie.root.loadChildren(ts) + if err != nil { + return err + } + + nextNodes, err := ts.trie.root.getChildren(ts.trie.Database()) + if err != nil { + return err + } + + for len(nextNodes) != 0 { + currentNode, err = ts.getNode(nextNodes[0].getHash()) + if err != nil { + return err + } + + nextNodes = nextNodes[1:] + + err = currentNode.loadChildren(ts) + if err != nil { + return err + } + + var children []node + children, err = currentNode.getChildren(ts.trie.Database()) + if err != nil { + return err + } + nextNodes = append(nextNodes, children...) + } + + err = ts.trie.Commit() + if err != nil { + return err + } + + return nil +} + +func (ts *trieSyncer) getNode(hash []byte) (node, error) { + n, ok := ts.interceptedNodes.Get(hash) + if ok { + return trieNode(n) + } + + err := ts.requestNode(hash) + if err != nil { + return nil, err + } + + n, _ = ts.interceptedNodes.Get(hash) + return trieNode(n) +} + +func trieNode(data interface{}) (node, error) { + n, ok := data.(*InterceptedTrieNode) + if !ok { + return nil, ErrWrongTypeAssertion + } + + return n.node, nil +} + +func (ts *trieSyncer) requestNode(hash []byte) error { + receivedRequestedHashTrigger := append(hash, hash...) + ts.requestedHashesMutex.Lock() + ts.requestedHashes = append(ts.requestedHashes, receivedRequestedHashTrigger) + ts.requestedHashesMutex.Unlock() + + err := ts.resolver.RequestDataFromHash(hash) + if err != nil { + return err + } + + return ts.waitForTrieNode() +} + +func (ts *trieSyncer) waitForTrieNode() error { + select { + case <-ts.chRcvTrieNodes: + return nil + case <-time.After(ts.waitTime): + return ErrTimeIsOut + } +} + +func (ts *trieSyncer) trieNodeIntercepted(hash []byte) { + ts.requestedHashesMutex.Lock() + + if hashInSlice(hash, ts.requestedHashes) { + ts.chRcvTrieNodes <- true + ts.removeRequestedHash(hash) + } + ts.requestedHashesMutex.Unlock() +} + +func (ts *trieSyncer) removeRequestedHash(hash []byte) { + for i := range ts.requestedHashes { + if bytes.Equal(ts.requestedHashes[i], hash) { + ts.requestedHashes = append(ts.requestedHashes[:i], ts.requestedHashes[i+1:]...) + } + } +} + +func hashInSlice(hash []byte, hashes [][]byte) bool { + for _, h := range hashes { + if bytes.Equal(h, hash) { + return true + } + } + return false +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ts *trieSyncer) IsInterfaceNil() bool { + return ts == nil +} diff --git a/data/trie/sync_test.go b/data/trie/sync_test.go new file mode 100644 index 00000000000..66fe8d31dc3 --- /dev/null +++ b/data/trie/sync_test.go @@ -0,0 +1,84 @@ +package trie_test + +import ( + "io/ioutil" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage/lrucache" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" +) + +func getInterceptedNodes(tr data.Trie, marshalizer marshal.Marshalizer, hasher hashing.Hasher) []*trie.InterceptedTrieNode { + nodes, _ := getEncodedTrieNodesAndHashes(tr) + + interceptedNodes := make([]*trie.InterceptedTrieNode, 0) + for i := range nodes { + node, _ := trie.NewInterceptedTrieNode(nodes[i], marshalizer, hasher) + interceptedNodes = append(interceptedNodes, node) + } + + return interceptedNodes +} + +func TestTrieSyncer_StartSyncing(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + marshalizer := &mock.ProtobufMarshalizerMock{} + hasher := &mock.KeccakMock{} + tempDir, _ := ioutil.TempDir("", strconv.Itoa(rand.Intn(100000))) + cfg := &config.DBConfig{ + FilePath: tempDir, + Type: string(storageUnit.LvlDbSerial), + BatchDelaySeconds: 1, + MaxBatchSize: 1, + MaxOpenFiles: 10, + } + + evictionWaitingList, _ := mock.NewEvictionWaitingList(100, memorydb.New(), marshalizer) + trieStorage, _ := trie.NewTrieStorageManager(db, cfg, evictionWaitingList) + tr, _ := trie.NewTrie(trieStorage, marshalizer, hasher) + + syncTrie := initTrie() + interceptedNodesCacher, _ := lrucache.NewCache(100) + interceptedNodes := getInterceptedNodes(syncTrie, marshalizer, hasher) + nrNodesToSend := 2 + nodesIndex := 0 + nrRequests := 0 + expectedRequests := 3 + + resolver := &mock.TrieNodesResolverStub{ + RequestDataFromHashCalled: func(hash []byte) error { + requestedNode := interceptedNodes[nodesIndex] + for i := nodesIndex; i < nodesIndex+nrNodesToSend; i++ { + interceptedNodesCacher.Put(interceptedNodes[i].Hash(), interceptedNodes[i]) + } + + requestedNode.CreateEndOfProcessingTriggerNode() + interceptedNodesCacher.Put(requestedNode.Hash(), requestedNode) + nodesIndex += nrNodesToSend + nrRequests++ + + return nil + }, + } + + rootHash, _ := syncTrie.Root() + sync, _ := trie.NewTrieSyncer(resolver, interceptedNodesCacher, tr, 10*time.Second) + + _ = sync.StartSyncing(rootHash) + newTrieRootHash, _ := tr.Root() + assert.Equal(t, rootHash, newTrieRootHash) + assert.Equal(t, expectedRequests, nrRequests) +} diff --git a/data/trie/trieStorageManager.go b/data/trie/trieStorageManager.go new file mode 100644 index 00000000000..0270d46914d --- /dev/null +++ b/data/trie/trieStorageManager.go @@ -0,0 +1,397 @@ +package trie + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "sync" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// trieStorageManager manages all the storage operations of the trie (commit, snapshot, checkpoint, pruning) +type trieStorageManager struct { + db data.DBWriteCacher + pruningBuffer [][]byte + + snapshots []storage.Persister + snapshotId int + snapshotDbCfg *config.DBConfig + snapshotsBuffer snapshotsBuffer + + dbEvictionWaitingList data.DBRemoveCacher + storageOperationMutex sync.RWMutex +} + +// NewTrieStorageManager creates a new instance of trieStorageManager +func NewTrieStorageManager(db data.DBWriteCacher, snapshotDbCfg *config.DBConfig, ewl data.DBRemoveCacher) (*trieStorageManager, error) { + if check.IfNil(db) { + return nil, ErrNilDatabase + } + if check.IfNil(ewl) { + return nil, ErrNilEvictionWaitingList + } + if snapshotDbCfg == nil { + return nil, ErrNilSnapshotDbConfig + } + + snapshots, snapshotId, err := getSnapshotsAndSnapshotId(snapshotDbCfg) + if err != nil { + log.Debug("get snapshot", "error", err.Error()) + } + + return &trieStorageManager{ + db: db, + pruningBuffer: make([][]byte, 0), + snapshots: snapshots, + snapshotId: snapshotId, + snapshotDbCfg: snapshotDbCfg, + snapshotsBuffer: newSnapshotsQueue(), + dbEvictionWaitingList: ewl, + }, nil +} + +func getSnapshotsAndSnapshotId(snapshotDbCfg *config.DBConfig) ([]storage.Persister, int, error) { + snapshots := make([]storage.Persister, 0) + snapshotId := 0 + + if !directoryExists(snapshotDbCfg.FilePath) { + return snapshots, snapshotId, nil + } + + files, err := ioutil.ReadDir(snapshotDbCfg.FilePath) + if err != nil { + return snapshots, snapshotId, err + } + + for _, f := range files { + if !f.IsDir() { + continue + } + + snapshotName, err := strconv.Atoi(f.Name()) + if err != nil { + return snapshots, snapshotId, err + } + + db, err := storageUnit.NewDB( + storageUnit.DBType(snapshotDbCfg.Type), + path.Join(snapshotDbCfg.FilePath, f.Name()), + snapshotDbCfg.BatchDelaySeconds, + snapshotDbCfg.MaxBatchSize, + snapshotDbCfg.MaxOpenFiles, + ) + if err != nil { + return snapshots, snapshotId, err + } + + if snapshotName > snapshotId { + snapshotId = snapshotName + } + + snapshots = append(snapshots, db) + } + + if len(snapshots) != 0 { + snapshotId++ + } + + return snapshots, snapshotId, nil +} + +// Database returns the main database +func (tsm *trieStorageManager) Database() data.DBWriteCacher { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + return tsm.db +} + +// SetDatabase sets the provided database as the main database +func (tsm *trieStorageManager) SetDatabase(db data.DBWriteCacher) { + tsm.storageOperationMutex.Lock() + tsm.db = db + tsm.storageOperationMutex.Unlock() +} + +// Clone returns a new instance of trieStorageManager +func (tsm *trieStorageManager) Clone() data.StorageManager { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + return &trieStorageManager{ + db: tsm.db, + pruningBuffer: tsm.pruningBuffer, + snapshots: tsm.snapshots, + snapshotId: tsm.snapshotId, + snapshotDbCfg: tsm.snapshotDbCfg, + snapshotsBuffer: tsm.snapshotsBuffer.clone(), + dbEvictionWaitingList: tsm.dbEvictionWaitingList, + } +} + +// Prune removes the given hash from db +func (tsm *trieStorageManager) Prune(rootHash []byte) error { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + log.Trace("trie storage manager prune", "root", rootHash) + if tsm.snapshotsBuffer.len() != 0 { + tsm.pruningBuffer = append(tsm.pruningBuffer, rootHash) + return nil + } + + err := tsm.removeFromDb(rootHash) + if err != nil { + return fmt.Errorf("trie storage manager prune error: %w, for root %v", err, core.ToB64(rootHash)) + } + + return nil +} + +// CancelPrune removes the given hash from the eviction waiting list +func (tsm *trieStorageManager) CancelPrune(rootHash []byte) { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + log.Trace("trie storage manager cancel prune", "root", rootHash) + _, _ = tsm.dbEvictionWaitingList.Evict(rootHash) +} + +func (tsm *trieStorageManager) removeFromDb(hash []byte) error { + hashes, err := tsm.dbEvictionWaitingList.Evict(hash) + if err != nil { + return err + } + + for i := range hashes { + err = tsm.db.Remove(hashes[i]) + if err != nil { + return err + } + } + + return nil +} + +// MarkForEviction adds the given hashes in the eviction waiting list at the provided key +func (tsm *trieStorageManager) MarkForEviction(root []byte, hashes [][]byte) error { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + log.Trace("trie storage manager: mark for eviction", "root", root) + + return tsm.dbEvictionWaitingList.Put(root, hashes) +} + +// GetDbThatContainsHash returns the database that contains the given hash +func (tsm *trieStorageManager) GetDbThatContainsHash(rootHash []byte) data.DBWriteCacher { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + _, err := tsm.db.Get(rootHash) + + hashPresent := err == nil + if hashPresent { + return tsm.db + } + + for i := range tsm.snapshots { + _, err = tsm.snapshots[i].Get(rootHash) + + hashPresent = err == nil + if hashPresent { + return tsm.snapshots[i] + } + } + + return nil +} + +// TakeSnapshot creates a new snapshot, or if there is another snapshot or checkpoint in progress, +// it adds this snapshot in the queue. +func (tsm *trieStorageManager) TakeSnapshot(rootHash []byte, msh marshal.Marshalizer, hsh hashing.Hasher) { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + tsm.snapshotsBuffer.add(rootHash, true) + if tsm.snapshotsBuffer.len() > 1 { + return + } + + go tsm.snapshot(msh, hsh) +} + +// SetCheckpoint creates a new checkpoint, or if there is another snapshot or checkpoint in progress, +// it adds this checkpoint in the queue. The checkpoint operation creates a new snapshot file +// only if there was no snapshot done prior to this +func (tsm *trieStorageManager) SetCheckpoint(rootHash []byte, msh marshal.Marshalizer, hsh hashing.Hasher) { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + tsm.snapshotsBuffer.add(rootHash, false) + if tsm.snapshotsBuffer.len() > 1 { + return + } + + go tsm.snapshot(msh, hsh) +} + +func (tsm *trieStorageManager) snapshot(msh marshal.Marshalizer, hsh hashing.Hasher) { + var keys [][]byte + isSnapshotsBufferEmpty := false + for !isSnapshotsBufferEmpty { + tsm.storageOperationMutex.Lock() + + snapshot := tsm.snapshotsBuffer.getFirst() + tr, err := newSnapshotTrie(tsm.db, msh, hsh, snapshot.rootHash) + if err != nil { + log.Error("trie storage manager: newSnapshotTrie", "error", err.Error()) + return + } + db := tsm.getSnapshotDb(snapshot.newDb) + + tsm.storageOperationMutex.Unlock() + + err = tr.root.commit(true, 0, tsm.db, db) + if err != nil { + log.Error("trie storage manager: commit", "error", err.Error()) + return + } + + tsm.storageOperationMutex.Lock() + tsm.snapshotsBuffer.removeFirst() + isSnapshotsBufferEmpty = tsm.snapshotsBuffer.len() == 0 + if isSnapshotsBufferEmpty { + keys = tsm.pruningBuffer + tsm.pruningBuffer = make([][]byte, 0) + } + tsm.storageOperationMutex.Unlock() + } + + tsm.removeKeysFromDb(keys) +} + +func (tsm *trieStorageManager) removeKeysFromDb(keys [][]byte) { + for i := range keys { + tsm.storageOperationMutex.Lock() + err := tsm.removeFromDb(keys[i]) + if err != nil { + log.Error("trie storage manager: removeKeysFromDb", "error", err.Error()) + } + tsm.storageOperationMutex.Unlock() + } +} + +func (tsm *trieStorageManager) getSnapshotDb(newDb bool) data.DBWriteCacher { + createNewDb := newDb || len(tsm.snapshots) == 0 + if !createNewDb { + return tsm.snapshots[len(tsm.snapshots)-1] + } + + db, err := tsm.newSnapshotDb() + if err != nil { + log.Error("trie storage manager: getSnapshotDb", "error", err.Error()) + return nil + } + + if len(tsm.snapshots) > maxSnapshots { + tsm.removeSnapshot() + } + + return db +} + +func (tsm *trieStorageManager) removeSnapshot() { + dbUniqueId := strconv.Itoa(tsm.snapshotId - len(tsm.snapshots)) + + err := tsm.snapshots[0].Close() + if err != nil { + log.Error("trie storage manager: removeSnapshot", "error", err.Error()) + return + } + tsm.snapshots = tsm.snapshots[1:] + + removePath := path.Join(tsm.snapshotDbCfg.FilePath, dbUniqueId) + go removeDirectory(removePath) +} + +func removeDirectory(path string) { + err := os.RemoveAll(path) + if err != nil { + log.Error(err.Error()) + } +} + +func newSnapshotTrie( + db data.DBWriteCacher, + msh marshal.Marshalizer, + hsh hashing.Hasher, + rootHash []byte, +) (*patriciaMerkleTrie, error) { + newRoot, err := getNodeFromDBAndDecode(rootHash, db, msh, hsh) + if err != nil { + return nil, err + } + + trieStorage, err := NewTrieStorageManager(db, &config.DBConfig{}, &mock.EvictionWaitingList{}) + if err != nil { + return nil, err + } + + return &patriciaMerkleTrie{ + root: newRoot, + trieStorage: trieStorage, + marshalizer: msh, + hasher: hsh, + }, nil +} + +func (tsm *trieStorageManager) newSnapshotDb() (storage.Persister, error) { + snapshotPath := path.Join(tsm.snapshotDbCfg.FilePath, strconv.Itoa(tsm.snapshotId)) + for directoryExists(snapshotPath) { + tsm.snapshotId++ + snapshotPath = path.Join(tsm.snapshotDbCfg.FilePath, strconv.Itoa(tsm.snapshotId)) + } + + db, err := storageUnit.NewDB( + storageUnit.DBType(tsm.snapshotDbCfg.Type), + snapshotPath, + tsm.snapshotDbCfg.BatchDelaySeconds, + tsm.snapshotDbCfg.MaxBatchSize, + tsm.snapshotDbCfg.MaxOpenFiles, + ) + if err != nil { + return nil, err + } + + tsm.snapshotId++ + tsm.snapshots = append(tsm.snapshots, db) + + return db, nil +} + +func directoryExists(path string) bool { + _, err := os.Stat(path) + return !os.IsNotExist(err) +} + +// IsPruningEnabled returns true if the trie pruning is enabled +func (tsm *trieStorageManager) IsPruningEnabled() bool { + return true +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tsm *trieStorageManager) IsInterfaceNil() bool { + return tsm == nil +} diff --git a/data/trie/trieStorageManagerWithoutPruning.go b/data/trie/trieStorageManagerWithoutPruning.go new file mode 100644 index 00000000000..bbefdc413b5 --- /dev/null +++ b/data/trie/trieStorageManagerWithoutPruning.go @@ -0,0 +1,66 @@ +package trie + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" +) + +// trieStorageManagerWithoutPruning manages the storage operations of the trie, but does not prune old values +type trieStorageManagerWithoutPruning struct { + *trieStorageManager +} + +// NewTrieStorageManagerWithoutPruning creates a new instance of trieStorageManagerWithoutPruning +func NewTrieStorageManagerWithoutPruning(db data.DBWriteCacher) (*trieStorageManagerWithoutPruning, error) { + if check.IfNil(db) { + return nil, ErrNilDatabase + } + + return &trieStorageManagerWithoutPruning{&trieStorageManager{db: db}}, nil +} + +// TakeSnapshot does nothing if pruning is disabled +func (tsm *trieStorageManagerWithoutPruning) TakeSnapshot([]byte, marshal.Marshalizer, hashing.Hasher) { + log.Trace("trieStorageManagerWithoutPruning - TakeSnapshot:trie storage pruning is disabled") +} + +// SetCheckpoint does nothing if pruning is disabled +func (tsm *trieStorageManagerWithoutPruning) SetCheckpoint([]byte, marshal.Marshalizer, hashing.Hasher) { + log.Trace("trieStorageManagerWithoutPruning - SetCheckpoint:trie storage pruning is disabled") +} + +// Prune does nothing if pruning is disabled +func (tsm *trieStorageManagerWithoutPruning) Prune([]byte) error { + log.Trace("trieStorageManagerWithoutPruning - Prune:trie storage pruning is disabled") + return nil +} + +// CancelPrune does nothing if pruning is disabled +func (tsm *trieStorageManagerWithoutPruning) CancelPrune([]byte) { + log.Trace("trieStorageManagerWithoutPruning - CancelPrune:trie storage pruning is disabled") +} + +// MarkForEviction does nothing if pruning is disabled +func (tsm *trieStorageManagerWithoutPruning) MarkForEviction([]byte, [][]byte) error { + log.Trace("trieStorageManagerWithoutPruning - MarkForEviction:trie storage pruning is disabled") + return nil +} + +// Clone returns a new instance of trieStorageManagerWithoutPruning +func (tsm *trieStorageManagerWithoutPruning) Clone() data.StorageManager { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + return &trieStorageManagerWithoutPruning{ + &trieStorageManager{ + db: tsm.db, + }, + } +} + +// IsPruningEnabled returns false if the trie pruning is disabled +func (tsm *trieStorageManagerWithoutPruning) IsPruningEnabled() bool { + return false +} diff --git a/data/trie/trieStorageManagerWithoutPruning_test.go b/data/trie/trieStorageManagerWithoutPruning_test.go new file mode 100644 index 00000000000..7bdfe7e3347 --- /dev/null +++ b/data/trie/trieStorageManagerWithoutPruning_test.go @@ -0,0 +1,79 @@ +package trie + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewTrieStorageManagerWithoutPruningWithNilDb(t *testing.T) { + t.Parallel() + + ts, err := NewTrieStorageManagerWithoutPruning(nil) + assert.Nil(t, ts) + assert.Equal(t, ErrNilDatabase, err) +} + +func TestNewTrieStorageManagerWithoutPruning(t *testing.T) { + t.Parallel() + + ts, err := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + assert.Nil(t, err) + assert.NotNil(t, ts) +} + +func TestTrieStorageManagerWithoutPruning_TakeSnapshotShouldNotPanic(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + ts.TakeSnapshot([]byte{}, &mock.MarshalizerMock{}, mock.HasherMock{}) +} + +func TestTrieStorageManagerWithoutPruning_SetCheckpointShouldNotPanic(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + ts.SetCheckpoint([]byte{}, &mock.MarshalizerMock{}, mock.HasherMock{}) +} + +func TestTrieStorageManagerWithoutPruning_PruneShouldNotPanic(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + err := ts.Prune([]byte{}) + assert.Nil(t, err) +} + +func TestTrieStorageManagerWithoutPruning_CancelPruneShouldNotPanic(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + ts.CancelPrune([]byte{}) +} + +func TestTrieStorageManagerWithoutPruning_MarkForEvictionShouldNotPanic(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + err := ts.MarkForEviction([]byte{}, [][]byte{}) + assert.Nil(t, err) +} + +func TestTrieStorageManagerWithoutPruning_Clone(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + + newTs := ts.Clone() + newTs, _ = newTs.(*trieStorageManagerWithoutPruning) + assert.True(t, ts != newTs) + assert.NotNil(t, newTs.Database()) +} + +func TestTrieStorageManagerWithoutPruning_IsPruningEnabled(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManagerWithoutPruning(mock.NewMemDbMock()) + assert.False(t, ts.IsPruningEnabled()) +} diff --git a/data/trie/trieStorageManager_test.go b/data/trie/trieStorageManager_test.go new file mode 100644 index 00000000000..952ef95bbe1 --- /dev/null +++ b/data/trie/trieStorageManager_test.go @@ -0,0 +1,435 @@ +package trie + +import ( + "io/ioutil" + "os" + "path" + "strconv" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" +) + +func TestNewTrieStorageManagerNilDb(t *testing.T) { + t.Parallel() + + ts, err := NewTrieStorageManager(nil, &config.DBConfig{}, &mock.EvictionWaitingList{}) + assert.Nil(t, ts) + assert.Equal(t, ErrNilDatabase, err) +} + +func TestNewTrieStorageManagerNilSnapshotDbConfig(t *testing.T) { + t.Parallel() + + ts, err := NewTrieStorageManager(mock.NewMemDbMock(), nil, &mock.EvictionWaitingList{}) + assert.Nil(t, ts) + assert.Equal(t, ErrNilSnapshotDbConfig, err) +} + +func TestNewTrieStorageManagerNilEwlAndPruningEnabled(t *testing.T) { + t.Parallel() + + ts, err := NewTrieStorageManager(mock.NewMemDbMock(), &config.DBConfig{}, nil) + assert.Nil(t, ts) + assert.Equal(t, ErrNilEvictionWaitingList, err) +} + +func TestNewTrieStorageManagerOkVals(t *testing.T) { + t.Parallel() + + ts, err := NewTrieStorageManager(mock.NewMemDbMock(), &config.DBConfig{}, &mock.EvictionWaitingList{}) + assert.Nil(t, err) + assert.NotNil(t, ts) +} + +func TestNewTrieStorageManagerWithExistingSnapshot(t *testing.T) { + t.Parallel() + + tempDir, _ := ioutil.TempDir("", "leveldb_temp") + cfg := &config.DBConfig{ + FilePath: tempDir, + Type: string(storageUnit.LvlDbSerial), + BatchDelaySeconds: 1, + MaxBatchSize: 1, + MaxOpenFiles: 10, + } + + db := mock.NewMemDbMock() + msh, hsh := getTestMarshAndHasher() + size := uint(100) + evictionWaitList, _ := mock.NewEvictionWaitingList(size, mock.NewMemDbMock(), msh) + trieStorage, _ := NewTrieStorageManager(db, cfg, evictionWaitList) + tr, _ := NewTrie(trieStorage, msh, hsh) + + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + rootHash, _ := tr.Root() + tr.TakeSnapshot(rootHash) + + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(time.Second) + } + _ = trieStorage.snapshots[0].Close() + + newTrieStorage, _ := NewTrieStorageManager(memorydb.New(), cfg, evictionWaitList) + snapshot := newTrieStorage.GetDbThatContainsHash(rootHash) + assert.NotNil(t, snapshot) + assert.Equal(t, 1, newTrieStorage.snapshotId) +} + +func TestTrieStorageManager_Clone(t *testing.T) { + t.Parallel() + + ts, _ := NewTrieStorageManager(mock.NewMemDbMock(), &config.DBConfig{}, &mock.EvictionWaitingList{}) + + newTs := ts.Clone() + newTs, _ = newTs.(*trieStorageManager) + assert.True(t, ts != newTs) +} + +func TestTrieDatabasePruning(t *testing.T) { + t.Parallel() + + db := mock.NewMemDbMock() + msh, hsh := getTestMarshAndHasher() + size := uint(1) + evictionWaitList, _ := mock.NewEvictionWaitingList(size, mock.NewMemDbMock(), msh) + trieStorage, _ := NewTrieStorageManager(db, &config.DBConfig{}, evictionWaitList) + + tr := &patriciaMerkleTrie{ + trieStorage: trieStorage, + oldHashes: make([][]byte, 0), + oldRoot: make([]byte, 0), + marshalizer: msh, + hasher: hsh, + } + + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + + key := []byte{6, 4, 6, 15, 6, 7, 16} + oldHashes := make([][]byte, 0) + n := tr.root + rootHash, _ := tr.Root() + oldHashes = append(oldHashes, rootHash) + + for i := 0; i < 3; i++ { + n, key, _ = n.getNext(key, db) + oldHashes = append(oldHashes, n.getHash()) + } + + _ = tr.Update([]byte("dog"), []byte("doee")) + _ = tr.Commit() + + err := tr.Prune(rootHash, data.OldRoot) + assert.Nil(t, err) + + for i := range oldHashes { + encNode, err := tr.Database().Get(oldHashes[i]) + assert.Nil(t, encNode) + assert.NotNil(t, err) + } +} + +func TestRecreateTrieFromSnapshotDb(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + + _ = tr.Commit() + rootHash, _ := tr.Root() + tr.TakeSnapshot(rootHash) + + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(snapshotDelay) + } + + _ = tr.Update([]byte("doge"), []byte("doge")) + _ = tr.Commit() + _ = tr.Prune(rootHash, data.OldRoot) + + val, err := tr.Database().Get(rootHash) + assert.Nil(t, val) + assert.NotNil(t, err) + + newTrie, err := tr.Recreate(rootHash) + assert.Nil(t, err) + assert.NotNil(t, newTrie) +} + +func TestEachSnapshotCreatesOwnDatabase(t *testing.T) { + t.Parallel() + + testVals := []struct { + key []byte + value []byte + }{ + {[]byte("doe"), []byte("reindeer")}, + {[]byte("dog"), []byte("puppy")}, + {[]byte("dogglesworth"), []byte("cat")}, + } + + tr, trieStorage, _ := newEmptyTrie() + + for _, testVal := range testVals { + _ = tr.Update(testVal.key, testVal.value) + _ = tr.Commit() + tr.TakeSnapshot(tr.root.getHash()) + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(snapshotDelay) + } + + snapshotId := strconv.Itoa(trieStorage.snapshotId - 1) + snapshotPath := path.Join(trieStorage.snapshotDbCfg.FilePath, snapshotId) + f, _ := os.Stat(snapshotPath) + assert.True(t, f.IsDir()) + } + + assert.Equal(t, len(testVals), trieStorage.snapshotId) +} + +func TestDeleteOldSnapshots(t *testing.T) { + t.Parallel() + + testVals := []struct { + key []byte + value []byte + }{ + {[]byte("doe"), []byte("reindeer")}, + {[]byte("dog"), []byte("puppy")}, + {[]byte("dogglesworth"), []byte("cat")}, + {[]byte("horse"), []byte("mustang")}, + } + + tr, trieStorage, _ := newEmptyTrie() + + for _, testVal := range testVals { + _ = tr.Update(testVal.key, testVal.value) + _ = tr.Commit() + tr.TakeSnapshot(tr.root.getHash()) + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(snapshotDelay) + } + } + + snapshots, _ := ioutil.ReadDir(trieStorage.snapshotDbCfg.FilePath) + assert.Equal(t, 2, len(snapshots)) + assert.Equal(t, "2", snapshots[0].Name()) + assert.Equal(t, "3", snapshots[1].Name()) +} + +func TestPruningIsBufferedWhileSnapshoting(t *testing.T) { + t.Parallel() + + nrVals := 100000 + index := 0 + var rootHashes [][]byte + + db := mock.NewMemDbMock() + msh, hsh := getTestMarshAndHasher() + evictionWaitListSize := uint(100) + evictionWaitList, _ := mock.NewEvictionWaitingList(evictionWaitListSize, mock.NewMemDbMock(), msh) + + tempDir, _ := ioutil.TempDir("", "leveldb_temp") + cfg := &config.DBConfig{ + FilePath: tempDir, + Type: string(storageUnit.LvlDbSerial), + BatchDelaySeconds: 1, + MaxBatchSize: 40000, + MaxOpenFiles: 10, + } + trieStorage, _ := NewTrieStorageManager(db, cfg, evictionWaitList) + + tr := &patriciaMerkleTrie{ + trieStorage: trieStorage, + marshalizer: msh, + hasher: hsh, + } + + for i := 0; i < nrVals; i++ { + _ = tr.Update(tr.hasher.Compute(strconv.Itoa(index)), tr.hasher.Compute(strconv.Itoa(index))) + index++ + } + + _ = tr.Commit() + rootHash := tr.root.getHash() + rootHashes = append(rootHashes, rootHash) + tr.TakeSnapshot(rootHash) + + nrRounds := 10 + nrUpdates := 1000 + for i := 0; i < nrRounds; i++ { + for j := 0; j < nrUpdates; j++ { + _ = tr.Update(tr.hasher.Compute(strconv.Itoa(index)), tr.hasher.Compute(strconv.Itoa(index))) + index++ + } + _ = tr.Commit() + + previousRootHashIndex := len(rootHashes) - 1 + currentRootHash := tr.root.getHash() + + _ = tr.Prune(rootHashes[previousRootHashIndex], data.OldRoot) + _ = tr.Prune(currentRootHash, data.NewRoot) + rootHashes = append(rootHashes, currentRootHash) + } + numKeysToBeEvicted := 21 + assert.Equal(t, numKeysToBeEvicted, len(evictionWaitList.Cache)) + assert.NotEqual(t, 0, trieStorage.pruningBufferLength()) + + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(snapshotDelay) + } + time.Sleep(snapshotDelay) + + for i := range rootHashes { + val, err := tr.Database().Get(rootHashes[i]) + assert.Nil(t, val) + assert.NotNil(t, err) + } + + time.Sleep(batchDelay) + val, err := trieStorage.snapshots[0].Get(rootHash) + assert.NotNil(t, val) + assert.Nil(t, err) +} + +func (tsm *trieStorageManager) pruningBufferLength() int { + tsm.storageOperationMutex.Lock() + defer tsm.storageOperationMutex.Unlock() + + return len(tsm.pruningBuffer) +} + +func TestTrieCheckpoint(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + + _ = tr.Commit() + tr.TakeSnapshot(tr.root.getHash()) + + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(snapshotDelay) + } + + _ = tr.Update([]byte("doge"), []byte("reindeer")) + _ = tr.Commit() + + val, err := tr.Get([]byte("doge")) + assert.Nil(t, err) + assert.Equal(t, []byte("reindeer"), val) + + snapshotTrieStorage, _ := NewTrieStorageManager(trieStorage.snapshots[0], &config.DBConfig{}, &mock.EvictionWaitingList{}) + collapsedRoot, _ := tr.root.getCollapsed() + snapshotTrie := &patriciaMerkleTrie{ + root: collapsedRoot, + trieStorage: snapshotTrieStorage, + marshalizer: tr.marshalizer, + hasher: tr.hasher, + } + + val, err = snapshotTrie.Get([]byte("doge")) + assert.NotNil(t, err) + assert.Nil(t, val) + + tr.SetCheckpoint(tr.root.getHash()) + + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(snapshotDelay) + } + + val, err = snapshotTrie.Get([]byte("doge")) + assert.Nil(t, err) + assert.Equal(t, []byte("reindeer"), val) +} + +func TestTrieCheckpointWithNoSnapshotCreatesSnapshot(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + + assert.Equal(t, 0, len(trieStorage.snapshots)) + + _ = tr.Commit() + tr.SetCheckpoint(tr.root.getHash()) + + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(snapshotDelay) + } + + assert.Equal(t, 1, len(trieStorage.snapshots)) +} + +func TestTrieSnapshottingAndCheckpointConcurrently(t *testing.T) { + t.Parallel() + + tr, trieStorage, _ := newEmptyTrie() + _ = tr.Update([]byte("doe"), []byte("reindeer")) + _ = tr.Update([]byte("dog"), []byte("puppy")) + _ = tr.Update([]byte("dogglesworth"), []byte("cat")) + _ = tr.Commit() + + tr.TakeSnapshot(tr.root.getHash()) + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(time.Second) + } + + numSnapshots := 10 + numCheckpoints := 10 + totalNumSnapshot := numSnapshots + 1 + + var snapshotWg sync.WaitGroup + var checkpointWg sync.WaitGroup + snapshotWg.Add(numSnapshots) + checkpointWg.Add(numCheckpoints) + + for i := 0; i < numSnapshots; i++ { + go func() { + rootHash, _ := tr.Root() + tr.TakeSnapshot(rootHash) + snapshotWg.Done() + }() + } + + for i := 0; i < numCheckpoints; i++ { + go func() { + rootHash, _ := tr.Root() + tr.SetCheckpoint(rootHash) + checkpointWg.Done() + }() + } + + snapshotWg.Wait() + checkpointWg.Wait() + + for trieStorage.snapshotsBuffer.len() != 0 { + time.Sleep(time.Second) + } + + assert.Equal(t, totalNumSnapshot, trieStorage.snapshotId) + + lastSnapshot := len(trieStorage.snapshots) - 1 + val, err := trieStorage.snapshots[lastSnapshot].Get(tr.root.getHash()) + assert.NotNil(t, val) + assert.Nil(t, err) +} diff --git a/dataRetriever/common.go b/dataRetriever/common.go new file mode 100644 index 00000000000..cf59dbce68f --- /dev/null +++ b/dataRetriever/common.go @@ -0,0 +1,26 @@ +package dataRetriever + +import "github.com/ElrondNetwork/elrond-go/process/factory" + +// SetEpochHandler sets the epoch handler to the metablock hdr resolver +func SetEpochHandlerToHdrResolver( + resolversContainer ResolversContainer, + epochHandler EpochHandler, +) error { + resolver, err := resolversContainer.Get(factory.MetachainBlocksTopic) + if err != nil { + return err + } + + hdrResolver, ok := resolver.(HeaderResolver) + if !ok { + return ErrWrongTypeInContainer + } + + err = hdrResolver.SetEpochHandler(epochHandler) + if err != nil { + return err + } + + return nil +} diff --git a/dataRetriever/dataPool/export_test.go b/dataRetriever/dataPool/export_test.go deleted file mode 100644 index 14db22e547d..00000000000 --- a/dataRetriever/dataPool/export_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package dataPool - -func (nspc *nonceSyncMapCacher) GetAddedHandlers() []func(nonce uint64, shardId uint32, value []byte) { - nspc.mutAddedDataHandlers.RLock() - defer nspc.mutAddedDataHandlers.RUnlock() - - handlers := make([]func(nonce uint64, shardId uint32, value []byte), len(nspc.addedDataHandlers)) - copy(handlers, nspc.addedDataHandlers) - - return handlers -} diff --git a/dataRetriever/dataPool/headersCache/errors.go b/dataRetriever/dataPool/headersCache/errors.go new file mode 100644 index 00000000000..cf482beb54e --- /dev/null +++ b/dataRetriever/dataPool/headersCache/errors.go @@ -0,0 +1,9 @@ +package headersCache + +import "github.com/pkg/errors" + +// ErrHeaderNotFound signals that the header that was searched was not found in the pool +var ErrHeaderNotFound = errors.New("cannot find header in cache") + +// ErrInvalidHeadersCacheParameter signals that parameters for headers cache are invalid +var ErrInvalidHeadersCacheParameter = errors.New("invalid headers cache parameters") diff --git a/dataRetriever/dataPool/headersCache/headersByHashMap.go b/dataRetriever/dataPool/headersCache/headersByHashMap.go new file mode 100644 index 00000000000..2603edde0c8 --- /dev/null +++ b/dataRetriever/dataPool/headersCache/headersByHashMap.go @@ -0,0 +1,30 @@ +package headersCache + +type headersByHashMap map[string]headerInfo + +func (hhm headersByHashMap) addElement(hash []byte, info headerInfo) bool { + if _, ok := hhm[string(hash)]; ok { + return true + } + + hhm[string(hash)] = info + return false +} + +func (hhm headersByHashMap) deleteElement(hash []byte) { + delete(hhm, string(hash)) +} + +func (hhm headersByHashMap) deleteBulk(hashes [][]byte) { + for _, hash := range hashes { + delete(hhm, string(hash)) + } +} + +func (hhm headersByHashMap) getElement(hash []byte) (headerInfo, bool) { + if element, ok := hhm[string(hash)]; ok { + return element, true + } + + return headerInfo{}, false +} diff --git a/dataRetriever/dataPool/headersCache/headersCache.go b/dataRetriever/dataPool/headersCache/headersCache.go new file mode 100644 index 00000000000..b1ded4a83da --- /dev/null +++ b/dataRetriever/dataPool/headersCache/headersCache.go @@ -0,0 +1,233 @@ +package headersCache + +import ( + "bytes" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" +) + +type headersCache struct { + headersNonceCache map[uint32]listOfHeadersByNonces + + headersByHash headersByHashMap + headersCounter numHeadersByShard + + numHeadersToRemove int + maxHeadersPerShard int +} + +func newHeadersCache(numMaxHeaderPerShard int, numHeadersToRemove int) *headersCache { + return &headersCache{ + headersNonceCache: make(map[uint32]listOfHeadersByNonces), + headersCounter: make(numHeadersByShard), + headersByHash: make(headersByHashMap), + numHeadersToRemove: numHeadersToRemove, + maxHeadersPerShard: numMaxHeaderPerShard, + } +} + +func (cache *headersCache) addHeader(headerHash []byte, header data.HeaderHandler) bool { + if check.IfNil(header) || len(headerHash) == 0 { + return false + } + + headerShardId := header.GetShardID() + headerNonce := header.GetNonce() + + cache.tryToDoEviction(headerShardId) + + headerInfo := headerInfo{headerNonce: headerNonce, headerShardId: headerShardId} + added := cache.headersByHash.addElement(headerHash, headerInfo) + if added { + return false + } + + shard := cache.getShardMap(headerShardId) + shard.appendHeaderToList(headerHash, header) + + cache.headersCounter.increment(headerShardId) + + return true + +} + +//tryToDoEviction will check if pool is full and if it is will do eviction +func (cache *headersCache) tryToDoEviction(shardId uint32) { + numHeaders := cache.getNumHeaders(shardId) + if int(numHeaders) >= cache.maxHeadersPerShard { + cache.lruEviction(shardId) + } +} + +func (cache *headersCache) lruEviction(shardId uint32) { + shard, ok := cache.headersNonceCache[shardId] + if !ok { + return + } + + nonces := shard.getNoncesSortedByTimestamp() + + numHashes := 0 + maxItemsToRemove := core.MinInt(cache.numHeadersToRemove, len(nonces)) + for i := 0; i < maxItemsToRemove; i++ { + numHashes += cache.removeHeaderByNonceAndShardId(nonces[i], shardId) + + if numHashes >= maxItemsToRemove { + break + } + } +} + +func (cache *headersCache) getShardMap(shardId uint32) listOfHeadersByNonces { + if _, ok := cache.headersNonceCache[shardId]; !ok { + cache.headersNonceCache[shardId] = make(listOfHeadersByNonces) + } + + return cache.headersNonceCache[shardId] +} + +func (cache *headersCache) getNumHeaders(shardId uint32) int64 { + return cache.headersCounter.getCount(shardId) +} + +func (cache *headersCache) removeHeaderByNonceAndShardId(headerNonce uint64, shardId uint32) int { + shard, ok := cache.headersNonceCache[shardId] + if !ok { + return 0 + } + + headers, ok := shard.getHeadersByNonce(headerNonce) + if !ok { + return 0 + } + headersHashes := headers.getHashes() + + //remove items from nonce map + shard.removeListOfHeaders(headerNonce) + //remove elements from hashes map + cache.headersByHash.deleteBulk(headersHashes) + + cache.headersCounter.decrement(shardId, len(headersHashes)) + + return len(headersHashes) +} + +func (cache *headersCache) removeHeaderByHash(hash []byte) { + if len(hash) == 0 { + return + } + + info, ok := cache.headersByHash.getElement(hash) + if !ok { + return + } + + cache.removeHeaderFromNonceMap(info, hash) + cache.headersByHash.deleteElement(hash) +} + +// removeHeaderFromNonceMap will remove a header from headerWithTimestamp +// when a header is removed by hash we need to remove also header from the map where is stored with nonce +func (cache *headersCache) removeHeaderFromNonceMap(headerInfo headerInfo, headerHash []byte) { + shard, ok := cache.headersNonceCache[headerInfo.headerShardId] + if !ok { + return + } + + headers, ok := shard.getHeadersByNonce(headerInfo.headerNonce) + if !ok { + return + } + + for index, header := range headers.items { + if !bytes.Equal(header.headerHash, headerHash) { + continue + } + + headers.removeHeader(index) + cache.headersCounter.decrement(headerInfo.headerShardId, 1) + + if headers.isEmpty() { + shard.removeListOfHeaders(headerInfo.headerNonce) + return + } + + shard.setListOfHeaders(headerInfo.headerNonce, headers) + return + } +} + +func (cache *headersCache) getHeaderByHash(hash []byte) (data.HeaderHandler, error) { + info, ok := cache.headersByHash.getElement(hash) + if !ok { + return nil, ErrHeaderNotFound + } + + shard, ok := cache.headersNonceCache[info.headerShardId] + if !ok { + return nil, ErrHeaderNotFound + } + + headers := shard.getListOfHeaders(info.headerNonce) + if headers.isEmpty() { + return nil, ErrHeaderNotFound + } + + headers.timestamp = time.Now() + shard.setListOfHeaders(info.headerNonce, headers) + + if header, ok := headers.findHeaderByHash(hash); ok { + return header, nil + } + + return nil, ErrHeaderNotFound +} + +func (cache *headersCache) getHeadersByNonceAndShardId(headerNonce uint64, shardId uint32) ([]headerDetails, bool) { + shard, ok := cache.headersNonceCache[shardId] + if !ok { + return nil, false + } + + headersList, ok := shard.getHeadersByNonce(headerNonce) + if !ok { + return nil, false + } + + return headersList.items, true +} + +func (cache *headersCache) getHeadersAndHashesByNonceAndShardId(nonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, bool) { + headersList, ok := cache.getHeadersByNonceAndShardId(nonce, shardId) + if !ok || len(headersList) == 0 { + return nil, nil, false + } + + headers := make([]data.HeaderHandler, 0, len(headersList)) + hashes := make([][]byte, 0, len(headersList)) + for _, headerDetails := range headersList { + headers = append(headers, headerDetails.header) + hashes = append(hashes, headerDetails.headerHash) + } + + return headers, hashes, true +} + +func (cache *headersCache) keys(shardId uint32) []uint64 { + shardMap := cache.getShardMap(shardId) + + return shardMap.keys() +} + +func (cache *headersCache) totalHeaders() int { + return cache.headersCounter.totalHeaders() +} + +func (cache *headersCache) clear() { + cache.headersNonceCache = make(map[uint32]listOfHeadersByNonces) + cache.headersCounter = make(numHeadersByShard) + cache.headersByHash = make(headersByHashMap) +} diff --git a/dataRetriever/dataPool/headersCache/headersCounter.go b/dataRetriever/dataPool/headersCache/headersCounter.go new file mode 100644 index 00000000000..9b1b6b7e795 --- /dev/null +++ b/dataRetriever/dataPool/headersCache/headersCounter.go @@ -0,0 +1,38 @@ +package headersCache + +type numHeadersByShard map[uint32]uint64 + +func (nhs numHeadersByShard) increment(shardId uint32) { + if _, ok := nhs[shardId]; !ok { + nhs[shardId] = 0 + } + + nhs[shardId]++ +} + +func (nhs numHeadersByShard) decrement(shardId uint32, value int) { + if _, ok := nhs[shardId]; !ok { + return + } + + nhs[shardId] -= uint64(value) +} + +func (nhs numHeadersByShard) getCount(shardId uint32) int64 { + numShardHeaders, ok := nhs[shardId] + if !ok { + return 0 + } + + return int64(numShardHeaders) +} + +func (nhs numHeadersByShard) totalHeaders() int { + total := 0 + + for _, value := range nhs { + total += int(value) + } + + return total +} diff --git a/dataRetriever/dataPool/headersCache/headersPool.go b/dataRetriever/dataPool/headersCache/headersPool.go new file mode 100644 index 00000000000..de2d2e70cd9 --- /dev/null +++ b/dataRetriever/dataPool/headersCache/headersPool.go @@ -0,0 +1,168 @@ +package headersCache + +import ( + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/logger" +) + +var log = logger.GetOrCreate("dataRetriever/headersCache") + +type headersPool struct { + cache *headersCache + mutAddedDataHandlers sync.RWMutex + mutHeadersPool sync.RWMutex + addedDataHandlers []func(headerHandler data.HeaderHandler, headerHash []byte) +} + +// NewHeadersPool will create a new items cacher +func NewHeadersPool(hdrsPoolConfig config.HeadersPoolConfig) (*headersPool, error) { + err := checkHeadersPoolConfig(hdrsPoolConfig) + if err != nil { + return nil, err + } + + headersCache := newHeadersCache(hdrsPoolConfig.MaxHeadersPerShard, hdrsPoolConfig.NumElementsToRemoveOnEviction) + + return &headersPool{ + cache: headersCache, + mutAddedDataHandlers: sync.RWMutex{}, + mutHeadersPool: sync.RWMutex{}, + addedDataHandlers: make([]func(headerHandler data.HeaderHandler, headerHash []byte), 0), + }, nil +} + +func checkHeadersPoolConfig(hdrsPoolConfig config.HeadersPoolConfig) error { + maxHdrsPerShard := hdrsPoolConfig.MaxHeadersPerShard + numElementsToRemove := hdrsPoolConfig.NumElementsToRemoveOnEviction + + if maxHdrsPerShard <= 0 { + return fmt.Errorf("%w, maxHdrsPerShard should be greater than 0", ErrInvalidHeadersCacheParameter) + } + if numElementsToRemove <= 0 { + return fmt.Errorf("%w, numElementsToRemove should be greater than 0", ErrInvalidHeadersCacheParameter) + } + + if maxHdrsPerShard < numElementsToRemove { + return fmt.Errorf("%w, maxHdrsPerShard should be greater than numElementsToRemove", ErrInvalidHeadersCacheParameter) + } + + return nil +} + +// AddHeader is used to add a header in pool +func (pool *headersPool) AddHeader(headerHash []byte, header data.HeaderHandler) { + pool.mutHeadersPool.Lock() + defer pool.mutHeadersPool.Unlock() + + added := pool.cache.addHeader(headerHash, header) + + if added { + pool.callAddedDataHandlers(header, headerHash) + } +} + +func (pool *headersPool) callAddedDataHandlers(headerHandler data.HeaderHandler, headerHash []byte) { + pool.mutAddedDataHandlers.RLock() + for _, handler := range pool.addedDataHandlers { + go handler(headerHandler, headerHash) + } + pool.mutAddedDataHandlers.RUnlock() +} + +// RemoveHeaderByHash will remove a header with a specific hash from pool +func (pool *headersPool) RemoveHeaderByHash(headerHash []byte) { + pool.mutHeadersPool.Lock() + defer pool.mutHeadersPool.Unlock() + + pool.cache.removeHeaderByHash(headerHash) +} + +// RemoveHeaderByNonceAndShardId will remove a header with a nonce and shard id from pool +func (pool *headersPool) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + pool.mutHeadersPool.Lock() + defer pool.mutHeadersPool.Unlock() + + _ = pool.cache.removeHeaderByNonceAndShardId(hdrNonce, shardId) +} + +// GetHeadersByNonceAndShardId will return a list of items from pool +func (pool *headersPool) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + pool.mutHeadersPool.Lock() + defer pool.mutHeadersPool.Unlock() + + headers, hashes, ok := pool.cache.getHeadersAndHashesByNonceAndShardId(hdrNonce, shardId) + if !ok { + return nil, nil, ErrHeaderNotFound + } + + return headers, hashes, nil +} + +// GetHeaderByHash will return a header handler from pool with a specific hash +func (pool *headersPool) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + pool.mutHeadersPool.Lock() + defer pool.mutHeadersPool.Unlock() + + return pool.cache.getHeaderByHash(hash) +} + +// GetNumHeaders will return how many header are in pool for a specific shard +func (pool *headersPool) GetNumHeaders(shardId uint32) int { + pool.mutHeadersPool.RLock() + defer pool.mutHeadersPool.RUnlock() + + return int(pool.cache.getNumHeaders(shardId)) +} + +// Clear will clear items pool +func (pool *headersPool) Clear() { + pool.mutHeadersPool.Lock() + defer pool.mutHeadersPool.Unlock() + + pool.cache.clear() +} + +// RegisterHandler registers a new handler to be called when a new data is added +func (pool *headersPool) RegisterHandler(handler func(headerHandler data.HeaderHandler, headerHash []byte)) { + if handler == nil { + log.Error("attempt to register a nil handler to a cacher object") + return + } + + pool.mutAddedDataHandlers.Lock() + pool.addedDataHandlers = append(pool.addedDataHandlers, handler) + pool.mutAddedDataHandlers.Unlock() +} + +// Nonces will return a slice of all items nonce that are in pool +func (pool *headersPool) Nonces(shardId uint32) []uint64 { + pool.mutHeadersPool.RLock() + defer pool.mutHeadersPool.RUnlock() + + return pool.cache.keys(shardId) +} + +// Len will return how many items are in pool +func (pool *headersPool) Len() int { + pool.mutHeadersPool.RLock() + defer pool.mutHeadersPool.RUnlock() + + return pool.cache.totalHeaders() +} + +// MaxSize will return how many header can be added in a pool ( per shard) +func (pool *headersPool) MaxSize() int { + pool.mutHeadersPool.RLock() + defer pool.mutHeadersPool.RUnlock() + + return pool.cache.maxHeadersPerShard +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pool *headersPool) IsInterfaceNil() bool { + return pool == nil +} diff --git a/dataRetriever/dataPool/headersCache/headersPool_test.go b/dataRetriever/dataPool/headersCache/headersPool_test.go new file mode 100644 index 00000000000..83997d03bc1 --- /dev/null +++ b/dataRetriever/dataPool/headersCache/headersPool_test.go @@ -0,0 +1,626 @@ +package headersCache_test + +import ( + "fmt" + "sort" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { + t.Parallel() + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1000, + NumElementsToRemoveOnEviction: 100}, + ) + + nonce := uint64(1) + shardId := uint32(0) + + headerHash1 := []byte("hash1") + headerHash2 := []byte("hash2") + testHdr1 := &block.Header{Nonce: nonce, ShardId: shardId} + testHdr2 := &block.Header{Nonce: nonce, ShardId: shardId, Round: 100} + + headersCacher.AddHeader(headerHash1, testHdr1) + headersCacher.AddHeader(headerHash2, testHdr2) + + header, err := headersCacher.GetHeaderByHash(headerHash1) + require.Nil(t, err) + require.Equal(t, testHdr1, header) + + header, err = headersCacher.GetHeaderByHash(headerHash2) + require.Nil(t, err) + require.Equal(t, testHdr2, header) + + expectedHeaders := []data.HeaderHandler{testHdr1, testHdr2} + headers, _, err := headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) + require.Nil(t, err) + require.Equal(t, expectedHeaders, headers) +} + +func Test_RemoveHeaderByHash(t *testing.T) { + t.Parallel() + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1000, + NumElementsToRemoveOnEviction: 100}, + ) + + nonce := uint64(1) + shardId := uint32(0) + + headerHash1 := []byte("hash1") + headerHash2 := []byte("hash2") + testHdr1 := &block.Header{Nonce: nonce, ShardId: shardId} + testHdr2 := &block.Header{Nonce: nonce, ShardId: shardId, Round: 100} + + headersCacher.AddHeader(headerHash1, testHdr1) + headersCacher.AddHeader(headerHash2, testHdr2) + + headersCacher.RemoveHeaderByHash(headerHash1) + header, err := headersCacher.GetHeaderByHash(headerHash1) + require.Nil(t, header) + require.Equal(t, headersCache.ErrHeaderNotFound, err) + + headersCacher.RemoveHeaderByHash(headerHash2) + header, err = headersCacher.GetHeaderByHash(headerHash2) + require.Nil(t, header) + require.Equal(t, headersCache.ErrHeaderNotFound, err) +} + +func TestHeadersCacher_AddHeadersInCacheAndRemoveByNonceAndShardId(t *testing.T) { + t.Parallel() + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1000, + NumElementsToRemoveOnEviction: 100}, + ) + + nonce := uint64(1) + shardId := uint32(0) + + headerHash1 := []byte("hash1") + headerHash2 := []byte("hash2") + testHdr1 := &block.Header{Nonce: nonce, ShardId: shardId} + testHdr2 := &block.Header{Nonce: nonce, ShardId: shardId, Round: 100} + + headersCacher.AddHeader(headerHash1, testHdr1) + headersCacher.AddHeader(headerHash2, testHdr2) + + headersCacher.RemoveHeaderByNonceAndShardId(nonce, shardId) + header, err := headersCacher.GetHeaderByHash(headerHash1) + require.Nil(t, header) + require.Equal(t, headersCache.ErrHeaderNotFound, err) + + header, err = headersCacher.GetHeaderByHash(headerHash2) + require.Nil(t, header) + require.Equal(t, headersCache.ErrHeaderNotFound, err) +} + +func TestHeadersCacher_Eviction(t *testing.T) { + t.Parallel() + + numHeadersToGenerate := 1001 + headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, 0) + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 900, + NumElementsToRemoveOnEviction: 100}, + ) + + for i := 0; i < numHeadersToGenerate; i++ { + headersCacher.AddHeader(headersHashes[i], &headers[i]) + } + + // Cache will do eviction 2 times, in items cache will be 801 items + require.Equal(t, 801, headersCacher.GetNumHeaders(0)) + + for i := 200; i < numHeadersToGenerate; i++ { + header, err := headersCacher.GetHeaderByHash(headersHashes[i]) + require.Nil(t, err) + require.Equal(t, &headers[i], header) + } +} + +func TestHeadersCacher_ConcurrentRequests_NoEviction(t *testing.T) { + t.Parallel() + + numHeadersToGenerate := 50 + + headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, 0) + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: numHeadersToGenerate + 1, + NumElementsToRemoveOnEviction: 10}, + ) + + var waitgroup sync.WaitGroup + for i := 0; i < numHeadersToGenerate; i++ { + waitgroup.Add(1) + go func(index int) { + headersCacher.AddHeader(headersHashes[index], &headers[index]) + header, err := headersCacher.GetHeaderByHash(headersHashes[index]) + + assert.Nil(t, err) + assert.Equal(t, &headers[index], header) + waitgroup.Done() + }(i) + } + waitgroup.Wait() +} + +func TestHeadersCacher_ConcurrentRequests_WithEviction(t *testing.T) { + shardId := uint32(0) + cacheSize := 2 + numHeadersToGenerate := 50 + + headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, shardId) + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: 1}, + ) + + var waitgroup sync.WaitGroup + for i := 0; i < numHeadersToGenerate; i++ { + waitgroup.Add(1) + go func(index int) { + headersCacher.AddHeader(headersHashes[index], &headers[index]) + waitgroup.Done() + }(i) + } + waitgroup.Wait() + // cache size after all eviction is finish should be 2 + require.Equal(t, 2, headersCacher.GetNumHeaders(shardId)) + + numHeadersToGenerate = 3 + headers, headersHashes = createASliceOfHeaders(3, shardId) + for i := 0; i < numHeadersToGenerate; i++ { + headersCacher.AddHeader(headersHashes[i], &headers[i]) + time.Sleep(time.Microsecond) + } + + require.Equal(t, 2, headersCacher.GetNumHeaders(shardId)) + header1, err := headersCacher.GetHeaderByHash(headersHashes[1]) + require.Nil(t, err) + require.Equal(t, &headers[1], header1) + + header2, err := headersCacher.GetHeaderByHash(headersHashes[2]) + require.Nil(t, err) + require.Equal(t, &headers[2], header2) +} + +func TestHeadersCacher_AddHeadersWithSameNonceShouldBeRemovedAtEviction(t *testing.T) { + t.Parallel() + + shardId := uint32(0) + cacheSize := 2 + + hash1, hash2, hash3 := []byte("hash1"), []byte("hash2"), []byte("hash3") + header1, header2, header3 := &block.Header{Nonce: 0}, &block.Header{Nonce: 0}, &block.Header{Nonce: 1} + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: 1}, + ) + headersCacher.AddHeader(hash1, header1) + headersCacher.AddHeader(hash2, header2) + headersCacher.AddHeader(hash3, header3) + + require.Equal(t, 1, headersCacher.GetNumHeaders(shardId)) + + header, err := headersCacher.GetHeaderByHash(hash3) + require.Nil(t, err) + require.Equal(t, header3, header) +} + +func TestHeadersCacher_AddALotOfHeadersAndCheckEviction(t *testing.T) { + t.Parallel() + + cacheSize := 100 + numHeaders := 200 + shardId := uint32(0) + headers, headersHash := createASliceOfHeaders(numHeaders, shardId) + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: 50}, + ) + + var waitgroup sync.WaitGroup + for i := 0; i < numHeaders; i++ { + waitgroup.Add(1) + go func(index int) { + headersCacher.AddHeader(headersHash[index], &headers[index]) + waitgroup.Done() + }(i) + } + + waitgroup.Wait() + assert.Equal(t, 100, headersCacher.GetNumHeaders(shardId)) +} + +func TestHeadersCacher_BigCacheALotOfHeaders(t *testing.T) { + t.Parallel() + + cacheSize := 100000 + numHeadersToGenerate := cacheSize + shardId := uint32(0) + + headers, headersHash := createASliceOfHeaders(numHeadersToGenerate, shardId) + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: 50}, + ) + + start := time.Now() + for i := 0; i < numHeadersToGenerate; i++ { + headersCacher.AddHeader(headersHash[i], &headers[i]) + } + elapsed := time.Since(start) + fmt.Printf("insert %d took %s \n", numHeadersToGenerate, elapsed) + + start = time.Now() + header, _ := headersCacher.GetHeaderByHash(headersHash[100]) + elapsed = time.Since(start) + require.Equal(t, &headers[100], header) + fmt.Printf("get header by hash took %s \n", elapsed) + + start = time.Now() + d, _, _ := headersCacher.GetHeadersByNonceAndShardId(uint64(100), shardId) + elapsed = time.Since(start) + fmt.Printf("get header by shard id and nonce took %s \n", elapsed) + require.Equal(t, &headers[100], d[0]) + + start = time.Now() + headersCacher.RemoveHeaderByNonceAndShardId(uint64(500), shardId) + elapsed = time.Since(start) + fmt.Printf("remove header by shard id and nonce took %s \n", elapsed) + + header, err := headersCacher.GetHeaderByHash(headersHash[500]) + require.Nil(t, header) + require.Error(t, headersCache.ErrHeaderNotFound, err) + + start = time.Now() + headersCacher.RemoveHeaderByHash(headersHash[2012]) + elapsed = time.Since(start) + fmt.Printf("remove header by hash took %s \n", elapsed) + + header, err = headersCacher.GetHeaderByHash(headersHash[2012]) + require.Nil(t, header) + require.Error(t, headersCache.ErrHeaderNotFound, err) +} + +func TestHeadersCacher_AddHeadersWithDifferentShardIdOnMultipleGoroutines(t *testing.T) { + t.Parallel() + + cacheSize := 51 + numHdrsToGenerate := 50 + + headersShard0, hashesShad0 := createASliceOfHeadersNonce0(numHdrsToGenerate, 0) + headersShard1, hashesShad1 := createASliceOfHeaders(numHdrsToGenerate, 1) + headersShard2, hashesShad2 := createASliceOfHeaders(numHdrsToGenerate, 2) + numElemsToRemove := 25 + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: numElemsToRemove}, + ) + + var waitgroup sync.WaitGroup + start := time.Now() + for i := 0; i < numHdrsToGenerate; i++ { + waitgroup.Add(5) + go func(index int) { + headersCacher.AddHeader(hashesShad0[index], &headersShard0[index]) + waitgroup.Done() + }(i) + + go func(index int) { + headersCacher.AddHeader(hashesShad1[index], &headersShard1[index]) + go func(index int) { + headersCacher.RemoveHeaderByHash(hashesShad1[index]) + waitgroup.Done() + }(index) + waitgroup.Done() + }(i) + + go func(index int) { + headersCacher.AddHeader(hashesShad2[index], &headersShard2[index]) + go func(index int) { + headersCacher.RemoveHeaderByHash(hashesShad2[index]) + waitgroup.Done() + }(index) + waitgroup.Done() + }(i) + } + + waitgroup.Wait() + + for i := 0; i < numHdrsToGenerate; i++ { + waitgroup.Add(1) + go func(index int) { + headersCacher.RemoveHeaderByHash(hashesShad0[index]) + waitgroup.Done() + }(i) + } + waitgroup.Wait() + + elapsed := time.Since(start) + fmt.Printf("time need to add %d in cache %s \n", numHdrsToGenerate, elapsed) + + require.Equal(t, 0, headersCacher.GetNumHeaders(0)) + require.Equal(t, 0, headersCacher.GetNumHeaders(1)) + require.Equal(t, 0, headersCacher.GetNumHeaders(2)) +} + +func TestHeadersCacher_TestEvictionRemoveCorrectHeader(t *testing.T) { + t.Parallel() + + shardId := uint32(0) + cacheSize := 2 + numHeadersToGenerate := 3 + + headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, shardId) + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: 1}, + ) + + for i := 0; i < numHeadersToGenerate-1; i++ { + headersCacher.AddHeader(headersHashes[i], &headers[i]) + time.Sleep(time.Microsecond) + } + + header, err := headersCacher.GetHeaderByHash(headersHashes[0]) + require.Nil(t, err) + require.Equal(t, &headers[0], header) + + headersCacher.AddHeader(headersHashes[2], &headers[2]) + + header, err = headersCacher.GetHeaderByHash(headersHashes[0]) + require.Nil(t, err) + require.Equal(t, &headers[0], header) + + header, err = headersCacher.GetHeaderByHash(headersHashes[2]) + require.Nil(t, err) + require.Equal(t, &headers[2], header) + + header, err = headersCacher.GetHeaderByHash(headersHashes[1]) + require.Nil(t, header) + require.Equal(t, headersCache.ErrHeaderNotFound, err) +} + +func TestHeadersCacher_TestEvictionRemoveCorrectHeader2(t *testing.T) { + t.Parallel() + + shardId := uint32(0) + cacheSize := 99 + numHeadersToGenerate := 100 + + headers, headersHashes := createASliceOfHeaders(numHeadersToGenerate, shardId) + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: 1}, + ) + + for i := 0; i < numHeadersToGenerate-1; i++ { + headersCacher.AddHeader(headersHashes[i], &headers[i]) + time.Sleep(time.Microsecond) + } + + headersFromCache, _, err := headersCacher.GetHeadersByNonceAndShardId(0, shardId) + require.Nil(t, err) + require.Equal(t, &headers[0], headersFromCache[0]) + + headersCacher.AddHeader(headersHashes[numHeadersToGenerate-1], &headers[numHeadersToGenerate-1]) + + header, err := headersCacher.GetHeaderByHash(headersHashes[0]) + require.Nil(t, err) + require.Equal(t, &headers[0], header) + + header, err = headersCacher.GetHeaderByHash(headersHashes[1]) + require.Nil(t, header) + require.Equal(t, headersCache.ErrHeaderNotFound, err) + + for i := 2; i <= cacheSize; i++ { + header, err := headersCacher.GetHeaderByHash(headersHashes[i]) + require.Nil(t, err) + require.Equal(t, &headers[i], header) + } +} + +func TestHeadersPool_AddHeadersMultipleShards(t *testing.T) { + t.Parallel() + + shardId0, shardId1, shardId2, shardMeta := uint32(0), uint32(1), uint32(1), sharding.MetachainShardId + cacheSize := 50 + numHeadersToGenerate := 49 + numElemsToRemove := 25 + + headersShard0, headersHashesShard0 := createASliceOfHeaders(numHeadersToGenerate, shardId0) + headersShard1, headersHashesShard1 := createASliceOfHeaders(numHeadersToGenerate, shardId1) + headersShard2, headersHashesShard2 := createASliceOfHeaders(numHeadersToGenerate, shardId2) + headersShardMeta, headersHashesShardMeta := createASliceOfHeaders(numHeadersToGenerate, shardMeta) + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: numElemsToRemove}, + ) + + var waitgroup sync.WaitGroup + start := time.Now() + for i := 0; i < numHeadersToGenerate; i++ { + waitgroup.Add(4) + go func(index int) { + headersCacher.AddHeader(headersHashesShard0[index], &headersShard0[index]) + waitgroup.Done() + }(i) + go func(index int) { + headersCacher.AddHeader(headersHashesShard1[index], &headersShard1[index]) + waitgroup.Done() + }(i) + go func(index int) { + headersCacher.AddHeader(headersHashesShard2[index], &headersShard2[index]) + waitgroup.Done() + }(i) + go func(index int) { + headersCacher.AddHeader(headersHashesShardMeta[index], &headersShardMeta[index]) + waitgroup.Done() + }(i) + } + + waitgroup.Wait() + + elapsed := time.Since(start) + fmt.Printf("add items in cache took %s \n", elapsed) + + for i := 0; i < numHeadersToGenerate; i++ { + waitgroup.Add(4) + go func(index int) { + header, err := headersCacher.GetHeaderByHash(headersHashesShard0[index]) + assert.Nil(t, err) + assert.Equal(t, &headersShard0[index], header) + waitgroup.Done() + }(i) + go func(index int) { + header, err := headersCacher.GetHeaderByHash(headersHashesShard1[index]) + assert.Nil(t, err) + assert.Equal(t, &headersShard1[index], header) + waitgroup.Done() + }(i) + go func(index int) { + header, err := headersCacher.GetHeaderByHash(headersHashesShard2[index]) + assert.Nil(t, err) + assert.Equal(t, &headersShard2[index], header) + waitgroup.Done() + }(i) + go func(index int) { + header, err := headersCacher.GetHeaderByHash(headersHashesShardMeta[index]) + assert.Nil(t, err) + assert.Equal(t, &headersShardMeta[index], header) + waitgroup.Done() + }(i) + } + + waitgroup.Wait() + + elapsed = time.Since(start) + fmt.Printf("get items by hash took %s \n", elapsed) +} + +func TestHeadersPool_Nonces(t *testing.T) { + t.Parallel() + + shardId := uint32(0) + numHeadersToGenerate := 1000 + cacheSize := 1000 + numHeadersToRemove := 100 + headersShard0, headersHashesShard0 := createASliceOfHeaders(numHeadersToGenerate, shardId) + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: cacheSize, + NumElementsToRemoveOnEviction: numHeadersToRemove}, + ) + + for i := 0; i < numHeadersToGenerate; i++ { + headersCacher.AddHeader(headersHashesShard0[i], &headersShard0[i]) + } + + require.Equal(t, cacheSize, headersCacher.MaxSize()) + require.Equal(t, numHeadersToGenerate, headersCacher.Len()) + + // get all keys and sort then to can verify if are ok + nonces := headersCacher.Nonces(shardId) + sort.Slice(nonces, func(i, j int) bool { + return nonces[i] < nonces[j] + }) + + for i := uint64(0); i < uint64(len(nonces)); i++ { + require.Equal(t, i, nonces[i]) + } +} + +func TestHeadersPool_RegisterHandler(t *testing.T) { + t.Parallel() + + wasCalled := false + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1000, + NumElementsToRemoveOnEviction: 100}, + ) + wg := sync.WaitGroup{} + wg.Add(1) + handler := func(header data.HeaderHandler, hash []byte) { + wasCalled = true + wg.Done() + } + headersCacher.RegisterHandler(handler) + header, hash := createASliceOfHeaders(1, 0) + headersCacher.AddHeader(hash[0], &header[0]) + + wg.Wait() + + assert.True(t, wasCalled) +} + +func TestHeadersPool_Clear(t *testing.T) { + t.Parallel() + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1000, + NumElementsToRemoveOnEviction: 10}, + ) + header, hash := createASliceOfHeaders(1, 0) + headersCacher.AddHeader(hash[0], &header[0]) + + headersCacher.Clear() + + require.Equal(t, 0, headersCacher.Len()) + require.Equal(t, 0, headersCacher.GetNumHeaders(0)) +} + +func createASliceOfHeaders(numHeaders int, shardId uint32) ([]block.Header, [][]byte) { + headers := make([]block.Header, 0) + headersHashes := make([][]byte, 0) + for i := 0; i < numHeaders; i++ { + headers = append(headers, block.Header{Nonce: uint64(i), ShardId: shardId}) + headersHashes = append(headersHashes, []byte(fmt.Sprintf("%d_%d", shardId, i))) + } + + return headers, headersHashes +} + +func createASliceOfHeadersNonce0(numHeaders int, shardId uint32) ([]block.Header, [][]byte) { + headers := make([]block.Header, 0) + headersHashes := make([][]byte, 0) + for i := 0; i < numHeaders; i++ { + headers = append(headers, block.Header{Nonce: 0, ShardId: shardId}) + headersHashes = append(headersHashes, []byte(fmt.Sprintf("%d_%d", shardId, i))) + } + + return headers, headersHashes +} diff --git a/dataRetriever/dataPool/headersCache/listOfHeadersByNonces.go b/dataRetriever/dataPool/headersCache/listOfHeadersByNonces.go new file mode 100644 index 00000000000..2348e5463e3 --- /dev/null +++ b/dataRetriever/dataPool/headersCache/listOfHeadersByNonces.go @@ -0,0 +1,85 @@ +package headersCache + +import ( + "sort" + "time" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type listOfHeadersByNonces map[uint64]timestampedListOfHeaders + +func (hMap listOfHeadersByNonces) getListOfHeaders(nonce uint64) timestampedListOfHeaders { + element, ok := hMap[nonce] + if !ok { + return timestampedListOfHeaders{ + items: make([]headerDetails, 0), + timestamp: time.Now(), + } + } + + return element +} + +func (hMap listOfHeadersByNonces) appendHeaderToList(headerHash []byte, header data.HeaderHandler) { + headerNonce := header.GetNonce() + headersWithTimestamp := hMap.getListOfHeaders(headerNonce) + + headerDetails := headerDetails{ + headerHash: headerHash, + header: header, + } + headersWithTimestamp.items = append(headersWithTimestamp.items, headerDetails) + hMap.setListOfHeaders(headerNonce, headersWithTimestamp) +} + +func (hMap listOfHeadersByNonces) setListOfHeaders(nonce uint64, element timestampedListOfHeaders) { + hMap[nonce] = element +} + +func (hMap listOfHeadersByNonces) removeListOfHeaders(nonce uint64) { + delete(hMap, nonce) +} + +func (hMap listOfHeadersByNonces) getNoncesSortedByTimestamp() []uint64 { + noncesTimestampsSlice := make([]nonceTimestamp, 0) + + for key, value := range hMap { + noncesTimestampsSlice = append(noncesTimestampsSlice, nonceTimestamp{nonce: key, timestamp: value.timestamp}) + } + + sort.Slice(noncesTimestampsSlice, func(i, j int) bool { + return noncesTimestampsSlice[j].timestamp.After(noncesTimestampsSlice[i].timestamp) + }) + + nonces := make([]uint64, 0) + for _, element := range noncesTimestampsSlice { + nonces = append(nonces, element.nonce) + } + + return nonces +} + +// getHeadersByNonce will return a list of items and update timestamp +func (hMap listOfHeadersByNonces) getHeadersByNonce(hdrNonce uint64) (timestampedListOfHeaders, bool) { + hdrsWithTimestamp := hMap.getListOfHeaders(hdrNonce) + if hdrsWithTimestamp.isEmpty() { + return timestampedListOfHeaders{}, false + } + + //update timestamp + hdrsWithTimestamp.timestamp = time.Now() + hMap.setListOfHeaders(hdrNonce, hdrsWithTimestamp) + + return hdrsWithTimestamp, true +} + +func (hMap listOfHeadersByNonces) keys() []uint64 { + nonces := make([]uint64, 0, len(hMap)) + + for key := range hMap { + nonces = append(nonces, key) + } + + return nonces +} diff --git a/dataRetriever/dataPool/headersCache/structs.go b/dataRetriever/dataPool/headersCache/structs.go new file mode 100644 index 00000000000..de61c3c4d8b --- /dev/null +++ b/dataRetriever/dataPool/headersCache/structs.go @@ -0,0 +1,54 @@ +package headersCache + +import ( + "bytes" + "github.com/ElrondNetwork/elrond-go/data" + "time" +) + +// this structure is only used for sorting +type nonceTimestamp struct { + nonce uint64 + timestamp time.Time +} + +type headerDetails struct { + headerHash []byte + header data.HeaderHandler +} + +type timestampedListOfHeaders struct { + items []headerDetails + timestamp time.Time +} + +func (listOfHeaders *timestampedListOfHeaders) isEmpty() bool { + return len(listOfHeaders.items) == 0 +} + +func (listOfHeaders *timestampedListOfHeaders) removeHeader(index int) { + listOfHeaders.items = append(listOfHeaders.items[:index], listOfHeaders.items[index+1:]...) +} + +func (listOfHeaders *timestampedListOfHeaders) getHashes() [][]byte { + hashes := make([][]byte, 0) + for _, header := range listOfHeaders.items { + hashes = append(hashes, header.headerHash) + } + + return hashes +} + +func (listOfHeaders *timestampedListOfHeaders) findHeaderByHash(hash []byte) (data.HeaderHandler, bool) { + for _, header := range listOfHeaders.items { + if bytes.Equal(hash, header.headerHash) { + return header.header, true + } + } + return nil, false +} + +type headerInfo struct { + headerNonce uint64 + headerShardId uint32 +} diff --git a/dataRetriever/dataPool/metaDataPool.go b/dataRetriever/dataPool/metaDataPool.go index 5aecfefb79f..1743b75df77 100644 --- a/dataRetriever/dataPool/metaDataPool.go +++ b/dataRetriever/dataPool/metaDataPool.go @@ -1,15 +1,15 @@ package dataPool import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/storage" ) type metaDataPool struct { - metaBlocks storage.Cacher miniBlocks storage.Cacher - shardHeaders storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher + trieNodes storage.Cacher + headers dataRetriever.HeadersPool transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier currBlockTxs dataRetriever.TransactionCacher @@ -17,42 +17,36 @@ type metaDataPool struct { // NewMetaDataPool creates a data pools holder object func NewMetaDataPool( - metaBlocks storage.Cacher, miniBlocks storage.Cacher, - shardHeaders storage.Cacher, - headersNonces dataRetriever.Uint64SyncMapCacher, + trieNodes storage.Cacher, + headersCacher dataRetriever.HeadersPool, transactions dataRetriever.ShardedDataCacherNotifier, unsignedTransactions dataRetriever.ShardedDataCacherNotifier, currBlockTxs dataRetriever.TransactionCacher, ) (*metaDataPool, error) { - - if metaBlocks == nil || metaBlocks.IsInterfaceNil() { - return nil, dataRetriever.ErrNilMetaBlockPool - } - if miniBlocks == nil || miniBlocks.IsInterfaceNil() { + if check.IfNil(miniBlocks) { return nil, dataRetriever.ErrNilMiniBlockHashesPool } - if shardHeaders == nil || shardHeaders.IsInterfaceNil() { + if check.IfNil(headersCacher) { return nil, dataRetriever.ErrNilShardHeaderPool } - if headersNonces == nil || headersNonces.IsInterfaceNil() { - return nil, dataRetriever.ErrNilMetaBlockNoncesPool - } - if transactions == nil || transactions.IsInterfaceNil() { + if check.IfNil(transactions) { return nil, dataRetriever.ErrNilTxDataPool } - if unsignedTransactions == nil || unsignedTransactions.IsInterfaceNil() { + if check.IfNil(unsignedTransactions) { return nil, dataRetriever.ErrNilUnsignedTransactionPool } - if currBlockTxs == nil || currBlockTxs.IsInterfaceNil() { + if check.IfNil(currBlockTxs) { return nil, dataRetriever.ErrNilCurrBlockTxs } + if trieNodes == nil || trieNodes.IsInterfaceNil() { + return nil, dataRetriever.ErrNilTrieNodesPool + } return &metaDataPool{ - metaBlocks: metaBlocks, miniBlocks: miniBlocks, - shardHeaders: shardHeaders, - headersNonces: headersNonces, + trieNodes: trieNodes, + headers: headersCacher, transactions: transactions, unsignedTransactions: unsignedTransactions, currBlockTxs: currBlockTxs, @@ -64,25 +58,14 @@ func (mdp *metaDataPool) CurrentBlockTxs() dataRetriever.TransactionCacher { return mdp.currBlockTxs } -// MetaBlocks returns the holder for meta blocks -func (mdp *metaDataPool) MetaBlocks() storage.Cacher { - return mdp.metaBlocks -} - // MiniBlocks returns the holder for meta mini block hashes func (mdp *metaDataPool) MiniBlocks() storage.Cacher { return mdp.miniBlocks } -// ShardHeaders returns the holder for shard headers -func (mdp *metaDataPool) ShardHeaders() storage.Cacher { - return mdp.shardHeaders -} - -// HeadersNonces returns the holder nonce-block hash pairs. It will hold both shard headers nonce-hash pairs -// also metachain header nonce-hash pairs -func (mdp *metaDataPool) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return mdp.headersNonces +// Headers returns the holder for shard/meta headers +func (mdp *metaDataPool) Headers() dataRetriever.HeadersPool { + return mdp.headers } // Transactions returns the holder for transactions which interact with the metachain @@ -95,6 +78,11 @@ func (mdp *metaDataPool) UnsignedTransactions() dataRetriever.ShardedDataCacherN return mdp.unsignedTransactions } +// TrieNodes returns the holder for trie nodes +func (mdp *metaDataPool) TrieNodes() storage.Cacher { + return mdp.trieNodes +} + // IsInterfaceNil returns true if there is no value under the interface func (mdp *metaDataPool) IsInterfaceNil() bool { if mdp == nil { diff --git a/dataRetriever/dataPool/metaDataPool_test.go b/dataRetriever/dataPool/metaDataPool_test.go index 02c31b125be..4f0293711ff 100644 --- a/dataRetriever/dataPool/metaDataPool_test.go +++ b/dataRetriever/dataPool/metaDataPool_test.go @@ -17,25 +17,7 @@ func TestNewMetaDataPool_NilMetaBlockShouldErr(t *testing.T) { tdp, err := dataPool.NewMetaDataPool( nil, &mock.CacherStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, - &mock.ShardedDataStub{}, - &mock.ShardedDataStub{}, - &mock.TxForCurrentBlockStub{}, - ) - - assert.Equal(t, dataRetriever.ErrNilMetaBlockPool, err) - assert.Nil(t, tdp) -} - -func TestNewMetaDataPool_NilMiniBlockHeaderHashesShouldErr(t *testing.T) { - t.Parallel() - - tdp, err := dataPool.NewMetaDataPool( - &mock.CacherStub{}, - nil, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.TxForCurrentBlockStub{}, @@ -52,7 +34,6 @@ func TestNewMetaDataPool_NilShardHeaderShouldErr(t *testing.T) { &mock.CacherStub{}, &mock.CacherStub{}, nil, - &mock.Uint64SyncMapCacherStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.TxForCurrentBlockStub{}, @@ -62,20 +43,19 @@ func TestNewMetaDataPool_NilShardHeaderShouldErr(t *testing.T) { assert.Nil(t, tdp) } -func TestNewMetaDataPool_NilHeaderNoncesShouldErr(t *testing.T) { +func TestNewMetaDataPool_NilTrieNodesShouldErr(t *testing.T) { t.Parallel() tdp, err := dataPool.NewMetaDataPool( - &mock.CacherStub{}, - &mock.CacherStub{}, &mock.CacherStub{}, nil, + &mock.HeadersCacherStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.TxForCurrentBlockStub{}, ) - assert.Equal(t, dataRetriever.ErrNilMetaBlockNoncesPool, err) + assert.Equal(t, dataRetriever.ErrNilTrieNodesPool, err) assert.Nil(t, tdp) } @@ -85,8 +65,7 @@ func TestNewMetaDataPool_NilTxPoolShouldErr(t *testing.T) { tdp, err := dataPool.NewMetaDataPool( &mock.CacherStub{}, &mock.CacherStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, nil, &mock.ShardedDataStub{}, &mock.TxForCurrentBlockStub{}, @@ -102,8 +81,7 @@ func TestNewMetaDataPool_NilUnsingedPoolNoncesShouldErr(t *testing.T) { tdp, err := dataPool.NewMetaDataPool( &mock.CacherStub{}, &mock.CacherStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.ShardedDataStub{}, nil, &mock.TxForCurrentBlockStub{}, @@ -116,18 +94,16 @@ func TestNewMetaDataPool_NilUnsingedPoolNoncesShouldErr(t *testing.T) { func TestNewMetaDataPool_ConfigOk(t *testing.T) { t.Parallel() - metaBlocks := &mock.CacherStub{} - shardHeaders := &mock.CacherStub{} + headers := &mock.HeadersCacherStub{} miniBlocks := &mock.CacherStub{} - hdrsNonces := &mock.Uint64SyncMapCacherStub{} transactions := &mock.ShardedDataStub{} unsigned := &mock.ShardedDataStub{} + trieNodes := &mock.CacherStub{} tdp, err := dataPool.NewMetaDataPool( - metaBlocks, miniBlocks, - shardHeaders, - hdrsNonces, + trieNodes, + headers, transactions, unsigned, &mock.TxForCurrentBlockStub{}, @@ -135,10 +111,8 @@ func TestNewMetaDataPool_ConfigOk(t *testing.T) { assert.Nil(t, err) //pointer checking - assert.True(t, metaBlocks == tdp.MetaBlocks()) - assert.True(t, shardHeaders == tdp.ShardHeaders()) + assert.True(t, headers == tdp.Headers()) assert.True(t, miniBlocks == tdp.MiniBlocks()) - assert.True(t, hdrsNonces == tdp.HeadersNonces()) assert.True(t, transactions == tdp.Transactions()) assert.True(t, unsigned == tdp.UnsignedTransactions()) } diff --git a/dataRetriever/dataPool/nonceSyncMapCacher.go b/dataRetriever/dataPool/nonceSyncMapCacher.go deleted file mode 100644 index e57be7ade9d..00000000000 --- a/dataRetriever/dataPool/nonceSyncMapCacher.go +++ /dev/null @@ -1,184 +0,0 @@ -package dataPool - -import ( - "bytes" - "sync" - - "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/logger" - "github.com/ElrondNetwork/elrond-go/storage" -) - -var log = logger.GetOrCreate("dataretriever/datapool") - -type nonceSyncMapCacher struct { - mergeMut sync.Mutex - cacher storage.Cacher - nonceConverter typeConverters.Uint64ByteSliceConverter - mutAddedDataHandlers sync.RWMutex - addedDataHandlers []func(nonce uint64, shardId uint32, value []byte) -} - -// NewNonceSyncMapCacher returns a new instance of nonceSyncMapCacher -func NewNonceSyncMapCacher( - cacher storage.Cacher, - nonceConverter typeConverters.Uint64ByteSliceConverter, -) (*nonceSyncMapCacher, error) { - - if cacher == nil || cacher.IsInterfaceNil() { - return nil, dataRetriever.ErrNilCacher - } - if nonceConverter == nil || nonceConverter.IsInterfaceNil() { - return nil, dataRetriever.ErrNilUint64ByteSliceConverter - } - - return &nonceSyncMapCacher{ - cacher: cacher, - nonceConverter: nonceConverter, - addedDataHandlers: make([]func(nonce uint64, shardId uint32, value []byte), 0), - }, nil -} - -// Clear is used to completely clear the cache. -func (nspc *nonceSyncMapCacher) Clear() { - nspc.cacher.Clear() -} - -// Get looks up for a nonce in cache. -func (nspc *nonceSyncMapCacher) Get(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { - val, ok := nspc.cacher.Peek(nspc.nonceConverter.ToByteSlice(nonce)) - if !ok { - return nil, ok - } - - syncMap, ok := val.(*ShardIdHashSyncMap) - if !ok { - return nil, ok - } - - return syncMap, ok -} - -// Merge will append existing values from src map. If the keys already exists in the existing map, their values -// will be overwritten. If the existing map is nil, a new map will created and all values from src map will be copied. -func (nspc *nonceSyncMapCacher) Merge(nonce uint64, src dataRetriever.ShardIdHashMap) { - if src == nil || src.IsInterfaceNil() { - return - } - - nspc.mergeMut.Lock() - defer nspc.mergeMut.Unlock() - - shouldRewriteMap := false - val, ok := nspc.cacher.Peek(nspc.nonceConverter.ToByteSlice(nonce)) - if !ok { - val = &ShardIdHashSyncMap{} - shouldRewriteMap = true - } - - syncMap := val.(*ShardIdHashSyncMap) - - if shouldRewriteMap { - nspc.cacher.Put(nspc.nonceConverter.ToByteSlice(nonce), syncMap) - } - - nspc.copySyncMap(nonce, syncMap, src) -} - -func (nspc *nonceSyncMapCacher) copySyncMap(nonce uint64, dest dataRetriever.ShardIdHashMap, src dataRetriever.ShardIdHashMap) { - src.Range(func(shardId uint32, hash []byte) bool { - existingVal, exists := dest.Load(shardId) - if !exists { - //new key with value - dest.Store(shardId, hash) - nspc.callAddedDataHandlers(nonce, shardId, hash) - return true - } - - if !bytes.Equal(existingVal, hash) { - //value mismatch - dest.Store(shardId, hash) - nspc.callAddedDataHandlers(nonce, shardId, hash) - return true - } - - return true - }) -} - -// Remove removes the nonce-shardId-hash tuple using the nonce and shardId -func (nspc *nonceSyncMapCacher) Remove(nonce uint64, shardId uint32) { - val, ok := nspc.cacher.Peek(nspc.nonceConverter.ToByteSlice(nonce)) - if !ok { - return - } - - syncMap, ok := val.(*ShardIdHashSyncMap) - if !ok { - return - } - - syncMap.Delete(shardId) - nspc.removeNonceFromCacheIfSyncMapIsEmpty(nonce, syncMap) -} - -// RegisterHandler registers a new handler to be called when a new data is added -func (nspc *nonceSyncMapCacher) RegisterHandler(handler func(nonce uint64, shardId uint32, value []byte)) { - if handler == nil { - log.Error("attempt to register a nil handler to a cacher object") - return - } - - nspc.mutAddedDataHandlers.Lock() - nspc.addedDataHandlers = append(nspc.addedDataHandlers, handler) - nspc.mutAddedDataHandlers.Unlock() -} - -func (nspc *nonceSyncMapCacher) callAddedDataHandlers(nonce uint64, shardId uint32, val []byte) { - nspc.mutAddedDataHandlers.RLock() - for _, handler := range nspc.addedDataHandlers { - go handler(nonce, shardId, val) - } - nspc.mutAddedDataHandlers.RUnlock() -} - -// Has returns true if a map is found for provided nonce ans shardId -func (nspc *nonceSyncMapCacher) Has(nonce uint64, shardId uint32) bool { - val, ok := nspc.cacher.Peek(nspc.nonceConverter.ToByteSlice(nonce)) - if !ok { - return false - } - - syncMap, ok := val.(*ShardIdHashSyncMap) - if !ok { - return false - } - - _, exists := syncMap.Load(shardId) - - return exists -} - -// IsInterfaceNil returns true if there is no value under the interface -func (nspc *nonceSyncMapCacher) IsInterfaceNil() bool { - if nspc == nil { - return true - } - return false -} - -func (nspc *nonceSyncMapCacher) removeNonceFromCacheIfSyncMapIsEmpty(nonce uint64, syncMap *ShardIdHashSyncMap) { - isSyncMapEmpty := true - syncMap.Range(func(shardId uint32, hash []byte) bool { - if hash != nil { - isSyncMapEmpty = false - return false - } - return true - }) - - if isSyncMapEmpty { - nspc.cacher.Remove(nspc.nonceConverter.ToByteSlice(nonce)) - } -} diff --git a/dataRetriever/dataPool/nonceSyncMapCacher_test.go b/dataRetriever/dataPool/nonceSyncMapCacher_test.go deleted file mode 100644 index b6ae27a6717..00000000000 --- a/dataRetriever/dataPool/nonceSyncMapCacher_test.go +++ /dev/null @@ -1,509 +0,0 @@ -package dataPool_test - -import ( - "bytes" - "fmt" - "sync" - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" - "github.com/stretchr/testify/assert" -) - -var timeToWaitAddedCallback = time.Second - -func TestNewNonceSyncMapCacher_NilCacherShouldErr(t *testing.T) { - t.Parallel() - - nsmc, err := dataPool.NewNonceSyncMapCacher(nil, &mock.Uint64ByteSliceConverterMock{}) - - assert.Nil(t, nsmc) - assert.Equal(t, dataRetriever.ErrNilCacher, err) -} - -func TestNewNonceSyncMapCacher_SliceConverterShouldErr(t *testing.T) { - t.Parallel() - - nsmc, err := dataPool.NewNonceSyncMapCacher(&mock.CacherStub{}, nil) - - assert.Nil(t, nsmc) - assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) -} - -func TestNewNonceSyncMapCacher_ShouldWork(t *testing.T) { - t.Parallel() - - nsmc, err := dataPool.NewNonceSyncMapCacher(&mock.CacherStub{}, &mock.Uint64ByteSliceConverterMock{}) - - assert.NotNil(t, nsmc) - assert.Nil(t, err) -} - -//------- Clear - -func TestNonceSyncMapCacher_Clear(t *testing.T) { - t.Parallel() - - wasCalled := false - nsmc, _ := dataPool.NewNonceSyncMapCacher( - &mock.CacherStub{ - ClearCalled: func() { - wasCalled = true - }, - }, - &mock.Uint64ByteSliceConverterMock{}) - - nsmc.Clear() - - assert.True(t, wasCalled) -} - -//------- Get - -func TestNonceSyncMapGet_GetWhenMissingShouldReturnFalseNil(t *testing.T) { - t.Parallel() - - nsmc, _ := dataPool.NewNonceSyncMapCacher( - &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - }, - &mock.Uint64ByteSliceConverterMock{ - ToByteSliceCalled: func(u uint64) []byte { - return make([]byte, 0) - }, - }) - - syncMap, ok := nsmc.Get(0) - - assert.False(t, ok) - assert.Nil(t, syncMap) -} - -func TestNonceSyncMapGet_GetWithWrongObjectTypeShouldReturnFalseNil(t *testing.T) { - t.Parallel() - - nsmc, _ := dataPool.NewNonceSyncMapCacher( - &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return "wrong object type", true - }, - }, - &mock.Uint64ByteSliceConverterMock{ - ToByteSliceCalled: func(u uint64) []byte { - return make([]byte, 0) - }, - }) - - syncMap, ok := nsmc.Get(0) - - assert.False(t, ok) - assert.Nil(t, syncMap) -} - -func TestNonceSyncMapGet_GetShouldReturnTruePointer(t *testing.T) { - t.Parallel() - - existingMap := &dataPool.ShardIdHashSyncMap{} - nsmc, _ := dataPool.NewNonceSyncMapCacher( - &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return existingMap, true - }, - }, - &mock.Uint64ByteSliceConverterMock{ - ToByteSliceCalled: func(u uint64) []byte { - return make([]byte, 0) - }, - }) - - syncMap, ok := nsmc.Get(0) - - assert.True(t, ok) - assert.True(t, syncMap == existingMap) -} - -//------- RegisterHandler - -func TestNonceSyncMapGet_RegisterHandlerWithNilPointerShouldNotAddHandler(t *testing.T) { - t.Parallel() - - nsmc, _ := dataPool.NewNonceSyncMapCacher(&mock.CacherStub{}, &mock.Uint64ByteSliceConverterMock{}) - - nsmc.RegisterHandler(nil) - handlers := nsmc.GetAddedHandlers() - - assert.Empty(t, handlers) -} - -func TestNonceSyncMapGet_RegisterHandlerShouldAddHandler(t *testing.T) { - t.Parallel() - - nsmc, _ := dataPool.NewNonceSyncMapCacher(&mock.CacherStub{}, &mock.Uint64ByteSliceConverterMock{}) - - handler := func(nonce uint64, shardId uint32, value []byte) {} - - nsmc.RegisterHandler(handler) - handlers := nsmc.GetAddedHandlers() - - assert.Equal(t, 1, len(handlers)) - assert.NotNil(t, handlers[0]) -} - -//------- Remove - -func TestNonceSyncMapCacher_RemoveWhenSyncMapDoesNotExistsShouldNotPanic(t *testing.T) { - t.Parallel() - - defer func() { - r := recover() - if r != nil { - assert.Fail(t, fmt.Sprintf("should have not fail: %v", r)) - } - }() - - shardId := uint32(89) - nonce := uint64(45) - - nsmc, _ := dataPool.NewNonceSyncMapCacher( - &mock.CacherStub{ - PeekCalled: func(key []byte) (interface{}, bool) { - return nil, false - }, - }, - mock.NewNonceHashConverterMock()) - - nsmc.Remove(nonce, shardId) -} - -func TestNonceSyncMapCacher_RemoveShardIdWhenExists(t *testing.T) { - t.Parallel() - - shardId := uint32(89) - nonce := uint64(45) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, []byte("hash")) - - nsmc, _ := dataPool.NewNonceSyncMapCacher( - &mock.CacherStub{ - PeekCalled: func(key []byte) (interface{}, bool) { - return syncMap, true - }, - RemoveCalled: func(key []byte) {}, - }, - mock.NewNonceHashConverterMock()) - - nsmc.Remove(nonce, shardId) - - syncMap.Range(func(shardId uint32, hash []byte) bool { - assert.Fail(t, "should have not found the existing key") - return false - }) -} - -//------- Merge - -func TestNonceSyncMapCacher_MergeNilSrcShouldIgnore(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, &mock.Uint64ByteSliceConverterMock{}) - - addedWasCalled := make(chan struct{}) - handler := func(nonce uint64, shardId uint32, value []byte) { - addedWasCalled <- struct{}{} - } - - nsmc.RegisterHandler(handler) - nonce := uint64(40) - nsmc.Merge(nonce, nil) - - assert.Equal(t, 0, cacher.Len()) - - select { - case <-addedWasCalled: - assert.Fail(t, "should have not called added") - case <-time.After(timeToWaitAddedCallback): - } -} - -func TestNonceSyncMapCacher_MergeWithEmptyMapShouldCreatesAnEmptyMapAndNotCallsAdded(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, mock.NewNonceHashConverterMock()) - - addedWasCalled := make(chan struct{}) - handler := func(nonce uint64, shardId uint32, value []byte) { - addedWasCalled <- struct{}{} - } - - nonce := uint64(40) - nsmc.RegisterHandler(handler) - nsmc.Merge(nonce, &dataPool.ShardIdHashSyncMap{}) - - assert.Equal(t, 1, cacher.Len()) - - retrievedMap, ok := nsmc.Get(nonce) - - assert.True(t, ok) - numExpectedValsInSyncMap := 0 - testRetrievedMapAndAddedNotCalled(t, retrievedMap, addedWasCalled, numExpectedValsInSyncMap) -} - -func TestNonceSyncMapCacher_MergeWithExistingKeyAndValShouldNotMergeOrCallAdded(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nonceConverter := mock.NewNonceHashConverterMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, nonceConverter) - - addedWasCalled := make(chan struct{}) - handler := func(nonce uint64, shardId uint32, value []byte) { - addedWasCalled <- struct{}{} - } - - nonce := uint64(40) - shardId := uint32(7) - val := []byte("value") - existingMap := &dataPool.ShardIdHashSyncMap{} - existingMap.Store(shardId, val) - nonceHash := nonceConverter.ToByteSlice(nonce) - cacher.Put(nonceHash, existingMap) - - nsmc.RegisterHandler(handler) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, val) - nsmc.Merge(nonce, syncMap) - - assert.Equal(t, 1, cacher.Len()) - - retrievedMap, ok := nsmc.Get(nonce) - - assert.True(t, ok) - numExpectedValsInSyncMap := 1 - testRetrievedMapAndAddedNotCalled(t, retrievedMap, addedWasCalled, numExpectedValsInSyncMap) -} - -func TestNonceSyncMapCacher_MergeWithNewKeyShouldMergeAndCallAdded(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nonceConverter := mock.NewNonceHashConverterMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, nonceConverter) - - nonce := uint64(40) - shardId := uint32(7) - val := []byte("value") - - addedWasCalled := make(chan struct{}) - handler := func(addedNonce uint64, addedShardId uint32, addedValue []byte) { - if addedNonce != nonce { - assert.Fail(t, "invalid nonce retrieved") - return - } - if addedShardId != shardId { - assert.Fail(t, "invalid shardID retrieved") - return - } - if !bytes.Equal(addedValue, val) { - assert.Fail(t, "invalid value retrieved") - return - } - - addedWasCalled <- struct{}{} - } - - nsmc.RegisterHandler(handler) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, val) - nsmc.Merge(nonce, syncMap) - - assert.Equal(t, 1, cacher.Len()) - - retrievedMap, ok := nsmc.Get(nonce) - - assert.True(t, ok) - numExpectedValsInSyncMap := 1 - testRetrievedMapAndAddedCalled(t, retrievedMap, addedWasCalled, numExpectedValsInSyncMap) -} - -func TestNonceSyncMapCacher_MergeWithExistingKeyButDifferentValShouldOverwriteAndCallAdded(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nonceConverter := mock.NewNonceHashConverterMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, nonceConverter) - - nonce := uint64(40) - shardId := uint32(7) - val := []byte("value") - existingMap := &dataPool.ShardIdHashSyncMap{} - existingMap.Store(shardId, val) - nonceHash := nonceConverter.ToByteSlice(nonce) - cacher.Put(nonceHash, existingMap) - - newVal := []byte("new value") - - addedWasCalled := make(chan struct{}) - handler := func(addedNonce uint64, addedShardId uint32, addedValue []byte) { - if addedNonce != nonce { - assert.Fail(t, "invalid nonce retrieved") - return - } - if addedShardId != shardId { - assert.Fail(t, "invalid shardID retrieved") - return - } - if !bytes.Equal(addedValue, newVal) { - assert.Fail(t, "invalid value retrieved") - return - } - - addedWasCalled <- struct{}{} - } - - nsmc.RegisterHandler(handler) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, newVal) - nsmc.Merge(nonce, syncMap) - - assert.Equal(t, 1, cacher.Len()) - - retrievedMap, ok := nsmc.Get(nonce) - - assert.True(t, ok) - numExpectedValsInSyncMap := 1 - testRetrievedMapAndAddedCalled(t, retrievedMap, addedWasCalled, numExpectedValsInSyncMap) -} - -func TestNonceSyncMapCacher_MergeInConcurrentialSettingShouldWork(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nonceConverter := mock.NewNonceHashConverterMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, nonceConverter) - - wg := sync.WaitGroup{} - nsmc.RegisterHandler(func(nonce uint64, shardId uint32, value []byte) { - wg.Done() - }) - - maxNonces := 100 - maxShards := 25 - wg.Add(maxNonces * maxShards) - - chDone := make(chan struct{}) - go func() { - wg.Wait() - chDone <- struct{}{} - }() - - for nonce := uint64(0); nonce < uint64(maxNonces); nonce++ { - for shardId := uint32(0); shardId < uint32(maxShards); shardId++ { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, make([]byte, 0)) - - go nsmc.Merge(nonce, syncMap) - } - } - - select { - case <-chDone: - case <-time.After(timeToWaitAddedCallback * 10): - assert.Fail(t, "timeout receiving all callbacks") - return - } - - for nonce := uint64(0); nonce < uint64(maxNonces); nonce++ { - retrievedMap, ok := nsmc.Get(nonce) - - assert.True(t, ok) - valsInsideMap := 0 - retrievedMap.Range(func(shardId uint32, hash []byte) bool { - valsInsideMap++ - return true - }) - - assert.Equal(t, maxShards, valsInsideMap) - } -} - -//------- Has - -func TestNonceSyncMapCacher_HasNotFoundShouldRetFalse(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nonceConverter := mock.NewNonceHashConverterMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, nonceConverter) - - inexistentNonce := uint64(67) - has := nsmc.Has(inexistentNonce, 0) - - assert.False(t, has) -} - -func TestNonceSyncMapCacher_HasFoundShouldRetTrue(t *testing.T) { - t.Parallel() - - cacher := mock.NewCacherMock() - nonceConverter := mock.NewNonceHashConverterMock() - nsmc, _ := dataPool.NewNonceSyncMapCacher(cacher, nonceConverter) - - nonce := uint64(67) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(0, []byte("X")) - nsmc.Merge(nonce, syncMap) - has := nsmc.Has(nonce, 0) - - assert.True(t, has) -} - -func testRetrievedMapAndAddedNotCalled( - t *testing.T, - retrievedMap dataRetriever.ShardIdHashMap, - addedWasCalled chan struct{}, - numExpectedValsInSyncMap int, -) { - - foundVals := 0 - retrievedMap.Range(func(shardId uint32, hash []byte) bool { - foundVals++ - return true - }) - - assert.Equal(t, numExpectedValsInSyncMap, foundVals) - - select { - case <-addedWasCalled: - assert.Fail(t, "should have not called added") - case <-time.After(timeToWaitAddedCallback): - } -} - -func testRetrievedMapAndAddedCalled( - t *testing.T, - retrievedMap dataRetriever.ShardIdHashMap, - addedWasCalled chan struct{}, - numExpectedValsInSyncMap int, -) { - - foundVals := 0 - retrievedMap.Range(func(shardId uint32, hash []byte) bool { - foundVals++ - return true - }) - - assert.Equal(t, numExpectedValsInSyncMap, foundVals) - - select { - case <-addedWasCalled: - case <-time.After(timeToWaitAddedCallback): - assert.Fail(t, "should have called added") - } -} diff --git a/dataRetriever/dataPool/shardDataPool.go b/dataRetriever/dataPool/shardDataPool.go index 13e47cebe56..24b12715c1f 100644 --- a/dataRetriever/dataPool/shardDataPool.go +++ b/dataRetriever/dataPool/shardDataPool.go @@ -1,6 +1,7 @@ package dataPool import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/storage" ) @@ -9,11 +10,10 @@ type shardedDataPool struct { transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier rewardTransactions dataRetriever.ShardedDataCacherNotifier - headers storage.Cacher - metaBlocks storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher + headers dataRetriever.HeadersPool miniBlocks storage.Cacher peerChangesBlocks storage.Cacher + trieNodes storage.Cacher currBlockTxs dataRetriever.TransactionCacher } @@ -22,51 +22,46 @@ func NewShardedDataPool( transactions dataRetriever.ShardedDataCacherNotifier, unsignedTransactions dataRetriever.ShardedDataCacherNotifier, rewardTransactions dataRetriever.ShardedDataCacherNotifier, - headers storage.Cacher, - headersNonces dataRetriever.Uint64SyncMapCacher, + headers dataRetriever.HeadersPool, miniBlocks storage.Cacher, peerChangesBlocks storage.Cacher, - metaBlocks storage.Cacher, + trieNodes storage.Cacher, currBlockTxs dataRetriever.TransactionCacher, ) (*shardedDataPool, error) { - if transactions == nil || transactions.IsInterfaceNil() { + if check.IfNil(transactions) { return nil, dataRetriever.ErrNilTxDataPool } - if unsignedTransactions == nil || unsignedTransactions.IsInterfaceNil() { + if check.IfNil(unsignedTransactions) { return nil, dataRetriever.ErrNilUnsignedTransactionPool } - if rewardTransactions == nil || rewardTransactions.IsInterfaceNil() { + if check.IfNil(rewardTransactions) { return nil, dataRetriever.ErrNilRewardTransactionPool } - if headers == nil || headers.IsInterfaceNil() { + if check.IfNil(headers) { return nil, dataRetriever.ErrNilHeadersDataPool } - if headersNonces == nil || headersNonces.IsInterfaceNil() { - return nil, dataRetriever.ErrNilHeadersNoncesDataPool - } - if miniBlocks == nil || miniBlocks.IsInterfaceNil() { + if check.IfNil(miniBlocks) { return nil, dataRetriever.ErrNilTxBlockDataPool } - if peerChangesBlocks == nil || peerChangesBlocks.IsInterfaceNil() { + if check.IfNil(peerChangesBlocks) { return nil, dataRetriever.ErrNilPeerChangeBlockDataPool } - if metaBlocks == nil || metaBlocks.IsInterfaceNil() { - return nil, dataRetriever.ErrNilMetaBlockPool - } - if currBlockTxs == nil || currBlockTxs.IsInterfaceNil() { + if check.IfNil(currBlockTxs) { return nil, dataRetriever.ErrNilCurrBlockTxs } + if trieNodes == nil || trieNodes.IsInterfaceNil() { + return nil, dataRetriever.ErrNilTrieNodesPool + } return &shardedDataPool{ transactions: transactions, unsignedTransactions: unsignedTransactions, rewardTransactions: rewardTransactions, headers: headers, - headersNonces: headersNonces, miniBlocks: miniBlocks, peerChangesBlocks: peerChangesBlocks, - metaBlocks: metaBlocks, + trieNodes: trieNodes, currBlockTxs: currBlockTxs, }, nil } @@ -92,16 +87,10 @@ func (tdp *shardedDataPool) RewardTransactions() dataRetriever.ShardedDataCacher } // Headers returns the holder for headers -func (tdp *shardedDataPool) Headers() storage.Cacher { +func (tdp *shardedDataPool) Headers() dataRetriever.HeadersPool { return tdp.headers } -// HeadersNonces returns the holder nonce-block hash pairs. It will hold both shard headers nonce-hash pairs -// also metachain header nonce-hash pairs -func (tdp *shardedDataPool) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return tdp.headersNonces -} - // MiniBlocks returns the holder for miniblocks func (tdp *shardedDataPool) MiniBlocks() storage.Cacher { return tdp.miniBlocks @@ -112,9 +101,9 @@ func (tdp *shardedDataPool) PeerChangesBlocks() storage.Cacher { return tdp.peerChangesBlocks } -// MetaBlocks returns the holder for meta blocks -func (tdp *shardedDataPool) MetaBlocks() storage.Cacher { - return tdp.metaBlocks +// TrieNodes returns the holder for trie nodes +func (tdp *shardedDataPool) TrieNodes() storage.Cacher { + return tdp.trieNodes } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/dataPool/shardDataPool_test.go b/dataRetriever/dataPool/shardDataPool_test.go index 533fa2f73b9..91c5925276e 100644 --- a/dataRetriever/dataPool/shardDataPool_test.go +++ b/dataRetriever/dataPool/shardDataPool_test.go @@ -18,8 +18,7 @@ func TestNewShardedDataPool_NilTransactionsShouldErr(t *testing.T) { nil, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, @@ -37,8 +36,7 @@ func TestNewShardedDataPool_NilUnsignedTransactionsShouldErr(t *testing.T) { &mock.ShardedDataStub{}, nil, &mock.ShardedDataStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, @@ -56,8 +54,7 @@ func TestNewShardedDataPool_NilRewardTransactionsShouldErr(t *testing.T) { &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, nil, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, @@ -76,7 +73,6 @@ func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, nil, - &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, &mock.CacherStub{}, @@ -87,25 +83,6 @@ func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { assert.Nil(t, tdp) } -func TestNewShardedDataPool_NilHeaderNoncesShouldErr(t *testing.T) { - t.Parallel() - - tdp, err := dataPool.NewShardedDataPool( - &mock.ShardedDataStub{}, - &mock.ShardedDataStub{}, - &mock.ShardedDataStub{}, - &mock.CacherStub{}, - nil, - &mock.CacherStub{}, - &mock.CacherStub{}, - &mock.CacherStub{}, - &mock.TxForCurrentBlockStub{}, - ) - - assert.Equal(t, dataRetriever.ErrNilHeadersNoncesDataPool, err) - assert.Nil(t, tdp) -} - func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { t.Parallel() @@ -113,8 +90,7 @@ func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, nil, &mock.CacherStub{}, &mock.CacherStub{}, @@ -125,41 +101,39 @@ func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { assert.Nil(t, tdp) } -func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { +func TestNewShardedDataPool_NilTrieNodesShouldErr(t *testing.T) { t.Parallel() tdp, err := dataPool.NewShardedDataPool( &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, + &mock.HeadersCacherStub{}, &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, nil, - &mock.CacherStub{}, &mock.TxForCurrentBlockStub{}, ) - assert.Equal(t, dataRetriever.ErrNilPeerChangeBlockDataPool, err) + assert.Equal(t, dataRetriever.ErrNilTrieNodesPool, err) assert.Nil(t, tdp) } -func TestNewShardedDataPool_NilMetaBlocksShouldErr(t *testing.T) { +func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { t.Parallel() tdp, err := dataPool.NewShardedDataPool( &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, - &mock.CacherStub{}, + &mock.HeadersCacherStub{}, &mock.CacherStub{}, nil, + &mock.CacherStub{}, &mock.TxForCurrentBlockStub{}, ) - assert.Equal(t, dataRetriever.ErrNilMetaBlockPool, err) + assert.Equal(t, dataRetriever.ErrNilPeerChangeBlockDataPool, err) assert.Nil(t, tdp) } @@ -167,20 +141,19 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { transactions := &mock.ShardedDataStub{} scResults := &mock.ShardedDataStub{} rewardTransactions := &mock.ShardedDataStub{} - headers := &mock.CacherStub{} - headerNonces := &mock.Uint64SyncMapCacherStub{} + headers := &mock.HeadersCacherStub{} txBlocks := &mock.CacherStub{} peersBlock := &mock.CacherStub{} - metaChainBlocks := &mock.CacherStub{} + trieNodes := &mock.CacherStub{} + tdp, err := dataPool.NewShardedDataPool( transactions, scResults, rewardTransactions, headers, - headerNonces, txBlocks, peersBlock, - metaChainBlocks, + trieNodes, &mock.TxForCurrentBlockStub{}, ) @@ -190,9 +163,7 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { assert.True(t, scResults == tdp.UnsignedTransactions()) assert.True(t, rewardTransactions == tdp.RewardTransactions()) assert.True(t, headers == tdp.Headers()) - assert.True(t, headerNonces == tdp.HeadersNonces()) assert.True(t, txBlocks == tdp.MiniBlocks()) assert.True(t, peersBlock == tdp.PeerChangesBlocks()) - assert.True(t, metaChainBlocks == tdp.MetaBlocks()) assert.True(t, scResults == tdp.UnsignedTransactions()) } diff --git a/dataRetriever/dataPool/shardIdHashSyncMap.go b/dataRetriever/dataPool/shardIdHashSyncMap.go deleted file mode 100644 index 1eaa3eadc58..00000000000 --- a/dataRetriever/dataPool/shardIdHashSyncMap.go +++ /dev/null @@ -1,62 +0,0 @@ -package dataPool - -import "sync" - -// ShardIdHashSyncMap is a simple wrapper over a sync map -// that has specialized methods for load, store, range and so on -type ShardIdHashSyncMap struct { - innerMap sync.Map -} - -// Load returns the hash stored in the map for a shardId, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (sihsm *ShardIdHashSyncMap) Load(shardId uint32) ([]byte, bool) { - value, ok := sihsm.innerMap.Load(shardId) - if !ok { - return nil, ok - } - - return value.([]byte), ok -} - -// Store sets the hash for a shardId. -func (sihsm *ShardIdHashSyncMap) Store(shardId uint32, hash []byte) { - sihsm.innerMap.Store(shardId, hash) -} - -// Range calls f sequentially for each shardId and hash present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (sihsm *ShardIdHashSyncMap) Range(f func(shardId uint32, hash []byte) bool) { - if f == nil { - return - } - - sihsm.innerMap.Range(func(key, value interface{}) bool { - uint32Key := key.(uint32) - bytesValue := value.([]byte) - - return f(uint32Key, bytesValue) - }) -} - -// Delete deletes the value for a key. -func (sihsm *ShardIdHashSyncMap) Delete(shardId uint32) { - sihsm.innerMap.Delete(shardId) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (sihsm *ShardIdHashSyncMap) IsInterfaceNil() bool { - if sihsm == nil { - return true - } - return false -} diff --git a/dataRetriever/dataPool/shardIdHashSyncMap_test.go b/dataRetriever/dataPool/shardIdHashSyncMap_test.go deleted file mode 100644 index f60e8848107..00000000000 --- a/dataRetriever/dataPool/shardIdHashSyncMap_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package dataPool_test - -import ( - "fmt" - "sync" - "testing" - - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/stretchr/testify/assert" -) - -func TestShardIdHashSyncMap_StoreLoadShouldWork(t *testing.T) { - t.Parallel() - - sihsm := dataPool.ShardIdHashSyncMap{} - - shardIds := []uint32{45, 38, 56} - hashes := [][]byte{[]byte("hash1"), []byte("hash2"), []byte("hash3")} - - for idx := 0; idx < len(shardIds); idx++ { - sihsm.Store(shardIds[idx], hashes[idx]) - - retrievedHash, ok := sihsm.Load(shardIds[idx]) - - assert.True(t, ok) - assert.Equal(t, hashes[idx], retrievedHash) - } -} - -func TestShardIdHashSyncMap_LoadingNilHash(t *testing.T) { - t.Parallel() - - sihsm := dataPool.ShardIdHashSyncMap{} - - shardId := uint32(67) - sihsm.Store(shardId, nil) - - retrievedHash, ok := sihsm.Load(shardId) - - assert.True(t, ok) - assert.Nil(t, retrievedHash) -} - -func TestShardIdHashSyncMap_LoadingNotExistingElement(t *testing.T) { - t.Parallel() - - sihsm := dataPool.ShardIdHashSyncMap{} - - shardId := uint32(67) - retrievedHash, ok := sihsm.Load(shardId) - - assert.False(t, ok) - assert.Nil(t, retrievedHash) -} - -func TestShardIdHashSyncMap_RangeWithNilHandlerShouldNotPanic(t *testing.T) { - t.Parallel() - - defer func() { - r := recover() - if r != nil { - assert.Fail(t, fmt.Sprintf("should have not paniced: %v", r)) - } - }() - - sihsm := dataPool.ShardIdHashSyncMap{} - - shardId := uint32(67) - hash := []byte("hash") - sihsm.Store(shardId, hash) - - sihsm.Range(nil) -} - -func TestShardIdHashSyncMap_RangeShouldWork(t *testing.T) { - t.Parallel() - - sihsm := dataPool.ShardIdHashSyncMap{} - - shardIds := []uint32{45, 38, 56} - hashes := [][]byte{[]byte("hash1"), []byte("hash2"), []byte("hash3")} - - mutVisitedMap := sync.Mutex{} - visitedMap := make(map[uint32]bool) - for i := 0; i < len(shardIds); i++ { - visitedMap[shardIds[i]] = false - sihsm.Store(shardIds[i], hashes[i]) - } - - sihsm.Range(func(shardId uint32, hash []byte) bool { - mutVisitedMap.Lock() - defer mutVisitedMap.Unlock() - - visitedMap[shardId] = true - return true - }) - - mutVisitedMap.Lock() - for _, wasVisited := range visitedMap { - assert.True(t, wasVisited) - } - mutVisitedMap.Unlock() - -} - -func TestShardIdHashSyncMap_Delete(t *testing.T) { - t.Parallel() - - sihsm := dataPool.ShardIdHashSyncMap{} - - shardIds := []uint32{45, 38, 56} - hashes := [][]byte{[]byte("hash1"), []byte("hash2"), []byte("hash3")} - - mutVisitedMap := sync.Mutex{} - visitedMap := make(map[uint32]bool) - for i := 0; i < len(shardIds); i++ { - visitedMap[shardIds[i]] = false - sihsm.Store(shardIds[i], hashes[i]) - } - - deletedIndex := 1 - sihsm.Delete(shardIds[deletedIndex]) - - sihsm.Range(func(shardId uint32, hash []byte) bool { - mutVisitedMap.Lock() - defer mutVisitedMap.Unlock() - - visitedMap[shardId] = true - return true - }) - - mutVisitedMap.Lock() - for shardId, wasVisited := range visitedMap { - shouldBeVisited := shardId != shardIds[deletedIndex] - assert.Equal(t, shouldBeVisited, wasVisited) - } - mutVisitedMap.Unlock() -} diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index 7b7b9ba42cf..c503e690f70 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -146,9 +146,18 @@ var ErrNilTxBlockDataPool = errors.New("nil tx block data pool") // ErrNilCacher signals that a nil cache has been provided var ErrNilCacher = errors.New("nil cacher") +// ErrCacheConfigInvalidSize signals that the cache parameter "size" is invalid +var ErrCacheConfigInvalidSize = errors.New("cache parameter [size] is not valid, it must be a positive number") + +// ErrCacheConfigInvalidShards signals that the cache parameter "shards" is invalid +var ErrCacheConfigInvalidShards = errors.New("cache parameter [shards] is not valid, it must be a positive number") + // ErrNilMetaBlockPool signals that a nil meta block data pool was provided var ErrNilMetaBlockPool = errors.New("nil meta block data pool") +// ErrNilTrieNodesPool signals that a nil trie nodes data pool was provided +var ErrNilTrieNodesPool = errors.New("nil trie nodes data pool") + // ErrNilMiniBlockHashesPool signals that a nil meta block data pool was provided var ErrNilMiniBlockHashesPool = errors.New("nil meta block mini block hashes data pool") @@ -173,32 +182,26 @@ var ErrNilDataPacker = errors.New("nil data packer provided") // ErrNilResolverFinder signals that a nil resolver finder has been provided var ErrNilResolverFinder = errors.New("nil resolvers finder") -// ErrEmptyTxRequestTopic signals that an empty transaction topic has been provided -var ErrEmptyTxRequestTopic = errors.New("empty transaction request topic") - -// ErrEmptyScrRequestTopic signals that an empty smart contract result topic has been provided -var ErrEmptyScrRequestTopic = errors.New("empty smart contract result request topic") - -// ErrEmptyRewardTxRequestTopic signals that an empty reward transaction topic has been provided -var ErrEmptyRewardTxRequestTopic = errors.New("empty rewards transactions request topic") - -// ErrEmptyMiniBlockRequestTopic signals that an empty miniblock topic has been provided -var ErrEmptyMiniBlockRequestTopic = errors.New("empty miniblock request topic") - -// ErrEmptyShardHeaderRequestTopic signals that an empty shard header topic has been provided -var ErrEmptyShardHeaderRequestTopic = errors.New("empty shard header request topic") - -// ErrEmptyMetaHeaderRequestTopic signals that an empty meta header topic has been provided -var ErrEmptyMetaHeaderRequestTopic = errors.New("empty meta header request topic") - // ErrInvalidMaxTxRequest signals that max tx request is too small var ErrInvalidMaxTxRequest = errors.New("max tx request number is invalid") // ErrNilPeerListCreator signals that a nil peer list creator implementation has been provided var ErrNilPeerListCreator = errors.New("nil peer list creator provided") +// ErrNilTrieDataGetter signals that a nil trie data getter has been provided +var ErrNilTrieDataGetter = errors.New("nil trie data getter provided") + +// ErrEmptyTrieNodesRequestTopic signals that an empty trie nodes topic has been provided +var ErrEmptyTrieNodesRequestTopic = errors.New("empty trie nodes request topic") + // ErrNilCurrBlockTxs signals that nil current blocks txs holder was provided var ErrNilCurrBlockTxs = errors.New("nil current block txs holder") // ErrNilRequestedItemsHandler signals that a nil requested items handler was provided var ErrNilRequestedItemsHandler = errors.New("nil requested items handler") + +// ErrNilEpochHandler signals that epoch handler is nil +var ErrNilEpochHandler = errors.New("nil epoch handler") + +// ErrBadRequest signals that the request should not have happened +var ErrBadRequest = errors.New("request should not be done as it doesn't follow the protocol") diff --git a/dataRetriever/factory/metachain/resolversContainerFactory.go b/dataRetriever/factory/metachain/resolversContainerFactory.go index 7e324031afc..613e6999442 100644 --- a/dataRetriever/factory/metachain/resolversContainerFactory.go +++ b/dataRetriever/factory/metachain/resolversContainerFactory.go @@ -1,6 +1,7 @@ package metachain import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/random" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -23,6 +24,7 @@ type resolversContainerFactory struct { uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter intRandomizer dataRetriever.IntRandomizer dataPacker dataRetriever.DataPacker + trieDataGetter dataRetriever.TrieDataGetter } // NewResolversContainerFactory creates a new container filled with topic resolvers @@ -34,29 +36,37 @@ func NewResolversContainerFactory( dataPools dataRetriever.MetaPoolsHolder, uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, dataPacker dataRetriever.DataPacker, + trieDataGetter dataRetriever.TrieDataGetter, + sizeCheckDelta uint32, ) (*resolversContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if check.IfNil(shardCoordinator) { return nil, dataRetriever.ErrNilShardCoordinator } - if messenger == nil || messenger.IsInterfaceNil() { + if check.IfNil(messenger) { return nil, dataRetriever.ErrNilMessenger } - if store == nil || store.IsInterfaceNil() { + if check.IfNil(store) { return nil, dataRetriever.ErrNilStore } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } - if dataPools == nil || dataPools.IsInterfaceNil() { + if sizeCheckDelta > 0 { + marshalizer = marshal.NewSizeCheckUnmarshalizer(marshalizer, sizeCheckDelta) + } + if check.IfNil(dataPools) { return nil, dataRetriever.ErrNilDataPoolHolder } - if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { + if check.IfNil(uint64ByteSliceConverter) { return nil, dataRetriever.ErrNilUint64ByteSliceConverter } - if dataPacker == nil || dataPacker.IsInterfaceNil() { + if check.IfNil(dataPacker) { return nil, dataRetriever.ErrNilDataPacker } + if trieDataGetter == nil || trieDataGetter.IsInterfaceNil() { + return nil, dataRetriever.ErrNilTrieDataGetter + } return &resolversContainerFactory{ shardCoordinator: shardCoordinator, @@ -67,6 +77,7 @@ func NewResolversContainerFactory( uint64ByteSliceConverter: uint64ByteSliceConverter, intRandomizer: &random.ConcurrentSafeIntRandomizer{}, dataPacker: dataPacker, + trieDataGetter: trieDataGetter, }, nil } @@ -127,6 +138,15 @@ func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer return nil, err } + keys, resolverSlice, err = rcf.generateTrieNodesResolver() + if err != nil { + return nil, err + } + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + return container, nil } @@ -152,11 +172,10 @@ func (rcf *resolversContainerFactory) generateShardHeaderResolvers() ([]string, keys := make([]string, noOfShards) resolverSlice := make([]dataRetriever.Resolver, noOfShards) - //wire up to topics: shardHeadersForMetachain_0_META, shardHeadersForMetachain_1_META ... + //wire up to topics: shardBlocks_0_META, shardBlocks_1_META ... for idx := uint32(0); idx < noOfShards; idx++ { - identifierHeader := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(idx) - // TODO: Should fix this to ask only other shard peers - excludePeersFromTopic := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + identifierHeader := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(idx) + excludePeersFromTopic := emptyExcludePeersOnTopic resolver, err := rcf.createShardHeaderResolver(identifierHeader, excludePeersFromTopic, idx) if err != nil { @@ -195,8 +214,7 @@ func (rcf *resolversContainerFactory) createShardHeaderResolver(topic string, ex hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) resolver, err := resolvers.NewHeaderResolver( resolverSender, - rcf.dataPools.ShardHeaders(), - rcf.dataPools.HeadersNonces(), + rcf.dataPools.Headers(), hdrStorer, hdrNonceStore, rcf.marshalizer, @@ -248,8 +266,7 @@ func (rcf *resolversContainerFactory) createMetaChainHeaderResolver(identifier s hdrNonceStore := rcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) resolver, err := resolvers.NewHeaderResolver( resolverSender, - rcf.dataPools.MetaBlocks(), - rcf.dataPools.HeadersNonces(), + rcf.dataPools.Headers(), hdrStorer, hdrNonceStore, rcf.marshalizer, @@ -433,3 +450,50 @@ func (rcf *resolversContainerFactory) IsInterfaceNil() bool { } return false } + +func (rcf *resolversContainerFactory) generateTrieNodesResolver() ([]string, []dataRetriever.Resolver, error) { + shardC := rcf.shardCoordinator + + identifierTrieNodes := factory.TrieNodesTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + + resolver, err := rcf.createTrieNodesResolver(identifierTrieNodes) + if err != nil { + return nil, nil, err + } + + return []string{identifierTrieNodes}, []dataRetriever.Resolver{resolver}, nil +} + +func (rcf *resolversContainerFactory) createTrieNodesResolver(topic string) (dataRetriever.Resolver, error) { + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, emptyExcludePeersOnTopic) + if err != nil { + return nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + topic, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + rcf.shardCoordinator.SelfId(), + ) + if err != nil { + return nil, err + } + + resolver, err := resolvers.NewTrieNodeResolver( + resolverSender, + rcf.trieDataGetter, + rcf.marshalizer, + ) + if err != nil { + return nil, err + } + + //add on the request topic + return rcf.createTopicAndAssignHandler( + topic+resolverSender.TopicRequestSuffix(), + resolver, + false) +} diff --git a/dataRetriever/factory/metachain/resolversContainerFactory_test.go b/dataRetriever/factory/metachain/resolversContainerFactory_test.go index 34d4a98d079..0b0c182a3f4 100644 --- a/dataRetriever/factory/metachain/resolversContainerFactory_test.go +++ b/dataRetriever/factory/metachain/resolversContainerFactory_test.go @@ -46,18 +46,12 @@ func createStubTopicMessageHandler(matchStrToErrOnCreate string, matchStrToErrOn func createDataPools() dataRetriever.MetaPoolsHolder { pools := &mock.MetaPoolsHolderStub{ - ShardHeadersCalled: func() storage.Cacher { - return &mock.CacherStub{} + HeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} }, MiniBlocksCalled: func() storage.Cacher { return &mock.CacherStub{} }, - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{} - }, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - }, TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} }, @@ -90,6 +84,8 @@ func TestNewResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -107,6 +103,8 @@ func TestNewResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -124,6 +122,8 @@ func TestNewResolversContainerFactory_NilStoreShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -141,6 +141,27 @@ func TestNewResolversContainerFactory_NilMarshalizerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) +} + +func TestNewResolversContainerFactory_NilMarshalizerAndSizeCheckShouldErr(t *testing.T) { + t.Parallel() + + rcf, err := metachain.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + nil, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + &mock.TrieStub{}, + 1, ) assert.Nil(t, rcf) @@ -158,6 +179,8 @@ func TestNewResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { nil, &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -175,6 +198,8 @@ func TestNewResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testin createDataPools(), nil, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -192,12 +217,33 @@ func TestNewResolversContainerFactory_NilDataPackerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, nil, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewResolversContainerFactory_NilTrieDataGetterShouldErr(t *testing.T) { + t.Parallel() + + rcf, err := metachain.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + nil, + 0, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) +} + func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -209,6 +255,8 @@ func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.NotNil(t, rcf) @@ -222,12 +270,14 @@ func TestResolversContainerFactory_CreateTopicShardHeadersForMetachainFailsShoul rcf, _ := metachain.NewResolversContainerFactory( mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.ShardHeadersForMetachainTopic, ""), + createStubTopicMessageHandler(factory.ShardBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -241,12 +291,14 @@ func TestResolversContainerFactory_CreateRegisterShardHeadersForMetachainFailsSh rcf, _ := metachain.NewResolversContainerFactory( mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.ShardHeadersForMetachainTopic), + createStubTopicMessageHandler("", factory.ShardBlocksTopic), createStore(), &mock.MarshalizerMock{}, createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -266,6 +318,8 @@ func TestResolversContainerFactory_CreateShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -290,6 +344,8 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, _ := rcf.Create() @@ -298,8 +354,9 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolversMiniBlocks := noOfShards + 1 numResolversUnsigned := noOfShards + 1 numResolversTxs := noOfShards + 1 + numResolversTrieNodes := 1 totalResolvers := numResolversShardHeadersForMetachain + numResolverMetablocks + numResolversMiniBlocks + - numResolversUnsigned + numResolversTxs + numResolversUnsigned + numResolversTxs + numResolversTrieNodes assert.Equal(t, totalResolvers, container.Len()) } diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index a421530178d..9c4ba872ce9 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -1,6 +1,7 @@ package shard import ( + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/random" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -23,6 +24,7 @@ type resolversContainerFactory struct { uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter intRandomizer dataRetriever.IntRandomizer dataPacker dataRetriever.DataPacker + trieDataGetter dataRetriever.TrieDataGetter } // NewResolversContainerFactory creates a new container filled with topic resolvers @@ -34,29 +36,37 @@ func NewResolversContainerFactory( dataPools dataRetriever.PoolsHolder, uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter, dataPacker dataRetriever.DataPacker, + trieDataGetter dataRetriever.TrieDataGetter, + sizeCheckDelta uint32, ) (*resolversContainerFactory, error) { - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if check.IfNil(shardCoordinator) { return nil, dataRetriever.ErrNilShardCoordinator } - if messenger == nil || messenger.IsInterfaceNil() { + if check.IfNil(messenger) { return nil, dataRetriever.ErrNilMessenger } - if store == nil || store.IsInterfaceNil() { + if check.IfNil(store) { return nil, dataRetriever.ErrNilTxStorage } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } - if dataPools == nil || dataPools.IsInterfaceNil() { + if sizeCheckDelta > 0 { + marshalizer = marshal.NewSizeCheckUnmarshalizer(marshalizer, sizeCheckDelta) + } + if check.IfNil(dataPools) { return nil, dataRetriever.ErrNilDataPoolHolder } - if uint64ByteSliceConverter == nil || uint64ByteSliceConverter.IsInterfaceNil() { + if check.IfNil(uint64ByteSliceConverter) { return nil, dataRetriever.ErrNilUint64ByteSliceConverter } - if dataPacker == nil || dataPacker.IsInterfaceNil() { + if check.IfNil(dataPacker) { return nil, dataRetriever.ErrNilDataPacker } + if trieDataGetter == nil || trieDataGetter.IsInterfaceNil() { + return nil, dataRetriever.ErrNilTrieDataGetter + } return &resolversContainerFactory{ shardCoordinator: shardCoordinator, @@ -67,6 +77,7 @@ func NewResolversContainerFactory( uint64ByteSliceConverter: uint64ByteSliceConverter, intRandomizer: &random.ConcurrentSafeIntRandomizer{}, dataPacker: dataPacker, + trieDataGetter: trieDataGetter, }, nil } @@ -141,7 +152,7 @@ func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer return nil, err } - keys, resolverSlice, err = rcf.generateMetachainShardHeaderResolver() + keys, resolverSlice, err = rcf.generateMetablockHeaderResolver() if err != nil { return nil, err } @@ -150,7 +161,7 @@ func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer return nil, err } - keys, resolverSlice, err = rcf.generateMetablockHeaderResolver() + keys, resolverSlice, err = rcf.generateTrieNodesResolver() if err != nil { return nil, err } @@ -255,8 +266,8 @@ func (rcf *resolversContainerFactory) createTxResolver( func (rcf *resolversContainerFactory) generateHdrResolver() ([]string, []dataRetriever.Resolver, error) { shardC := rcf.shardCoordinator - //only one intrashard header topic - identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + //only one shard header topic, for example: shardBlocks_0_META + identifierHdr := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) if err != nil { @@ -281,7 +292,6 @@ func (rcf *resolversContainerFactory) generateHdrResolver() ([]string, []dataRet resolver, err := resolvers.NewHeaderResolver( resolverSender, rcf.dataPools.Headers(), - rcf.dataPools.HeadersNonces(), hdrStorer, hdrNonceStore, rcf.marshalizer, @@ -299,21 +309,9 @@ func (rcf *resolversContainerFactory) generateHdrResolver() ([]string, []dataRet return nil, nil, err } - err = rcf.createTopicHeadersForMetachain() - if err != nil { - return nil, nil, err - } - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil } -func (rcf *resolversContainerFactory) createTopicHeadersForMetachain() error { - shardC := rcf.shardCoordinator - identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - - return rcf.messenger.CreateTopic(identifierHdr, true) -} - //------- MiniBlocks resolvers func (rcf *resolversContainerFactory) generateMiniBlocksResolvers() ([]string, []dataRetriever.Resolver, error) { @@ -421,59 +419,6 @@ func (rcf *resolversContainerFactory) generatePeerChBlockBodyResolver() ([]strin return []string{identifierPeerCh}, []dataRetriever.Resolver{resolver}, nil } -//------- MetachainShardHeaderResolvers - -func (rcf *resolversContainerFactory) generateMetachainShardHeaderResolver() ([]string, []dataRetriever.Resolver, error) { - shardC := rcf.shardCoordinator - - //only one metachain header topic - //example: shardHeadersForMetachain_0_META - identifierHdr := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) - peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, identifierHdr, emptyExcludePeersOnTopic) - if err != nil { - return nil, nil, err - } - - hdrStorer := rcf.store.GetStorer(dataRetriever.BlockHeaderUnit) - resolverSender, err := topicResolverSender.NewTopicResolverSender( - rcf.messenger, - identifierHdr, - peerListCreator, - rcf.marshalizer, - rcf.intRandomizer, - shardC.SelfId(), - ) - if err != nil { - return nil, nil, err - } - - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardC.SelfId()) - hdrNonceStore := rcf.store.GetStorer(hdrNonceHashDataUnit) - resolver, err := resolvers.NewHeaderResolver( - resolverSender, - rcf.dataPools.Headers(), - rcf.dataPools.HeadersNonces(), - hdrStorer, - hdrNonceStore, - rcf.marshalizer, - rcf.uint64ByteSliceConverter, - ) - if err != nil { - return nil, nil, err - } - - //add on the request topic - _, err = rcf.createTopicAndAssignHandler( - identifierHdr+resolverSender.TopicRequestSuffix(), - resolver, - false) - if err != nil { - return nil, nil, err - } - - return []string{identifierHdr}, []dataRetriever.Resolver{resolver}, nil -} - //------- MetaBlockHeaderResolvers func (rcf *resolversContainerFactory) generateMetablockHeaderResolver() ([]string, []dataRetriever.Resolver, error) { @@ -484,7 +429,7 @@ func (rcf *resolversContainerFactory) generateMetablockHeaderResolver() ([]strin identifierHdr := factory.MetachainBlocksTopic hdrStorer := rcf.store.GetStorer(dataRetriever.MetaBlockUnit) - metaAndCrtShardTopic := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + metaAndCrtShardTopic := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) excludedPeersOnTopic := factory.TransactionTopic + shardC.CommunicationIdentifier(shardC.SelfId()) peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, metaAndCrtShardTopic, excludedPeersOnTopic) @@ -507,8 +452,7 @@ func (rcf *resolversContainerFactory) generateMetablockHeaderResolver() ([]strin hdrNonceStore := rcf.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) resolver, err := resolvers.NewHeaderResolver( resolverSender, - rcf.dataPools.MetaBlocks(), - rcf.dataPools.HeadersNonces(), + rcf.dataPools.Headers(), hdrStorer, hdrNonceStore, rcf.marshalizer, @@ -564,3 +508,66 @@ func (rcf *resolversContainerFactory) IsInterfaceNil() bool { } return false } + +func (rcf *resolversContainerFactory) generateTrieNodesResolver() ([]string, []dataRetriever.Resolver, error) { + shardC := rcf.shardCoordinator + + keys := make([]string, 0) + resolverSlice := make([]dataRetriever.Resolver, 0) + + identifierTrieNodes := factory.TrieNodesTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + + resolver, err := rcf.createTrieNodesResolver(identifierTrieNodes) + if err != nil { + return nil, nil, err + } + + resolverSlice = append(resolverSlice, resolver) + keys = append(keys, identifierTrieNodes) + + identifierTrieNodes = factory.TrieNodesTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + + resolver, err = rcf.createTrieNodesResolver(identifierTrieNodes) + if err != nil { + return nil, nil, err + } + + resolverSlice = append(resolverSlice, resolver) + keys = append(keys, identifierTrieNodes) + + return keys, resolverSlice, nil +} + +func (rcf *resolversContainerFactory) createTrieNodesResolver(topic string) (dataRetriever.Resolver, error) { + peerListCreator, err := topicResolverSender.NewDiffPeerListCreator(rcf.messenger, topic, emptyExcludePeersOnTopic) + if err != nil { + return nil, err + } + + resolverSender, err := topicResolverSender.NewTopicResolverSender( + rcf.messenger, + topic, + peerListCreator, + rcf.marshalizer, + rcf.intRandomizer, + rcf.shardCoordinator.SelfId(), + ) + if err != nil { + return nil, err + } + + resolver, err := resolvers.NewTrieNodeResolver( + resolverSender, + rcf.trieDataGetter, + rcf.marshalizer, + ) + if err != nil { + return nil, err + } + + //add on the request topic + return rcf.createTopicAndAssignHandler( + topic+resolverSender.TopicRequestSuffix(), + resolver, + false) +} diff --git a/dataRetriever/factory/shard/resolversContainerFactory_test.go b/dataRetriever/factory/shard/resolversContainerFactory_test.go index d8de13689f3..793ef197235 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory_test.go +++ b/dataRetriever/factory/shard/resolversContainerFactory_test.go @@ -51,11 +51,8 @@ func createDataPools() dataRetriever.PoolsHolder { pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } - pools.HeadersCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} + pools.HeadersCalled = func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} } pools.MiniBlocksCalled = func() storage.Cacher { return &mock.CacherStub{} @@ -63,9 +60,6 @@ func createDataPools() dataRetriever.PoolsHolder { pools.PeerChangesBlocksCalled = func() storage.Cacher { return &mock.CacherStub{} } - pools.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{} - } pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } @@ -97,6 +91,8 @@ func TestNewResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -114,6 +110,8 @@ func TestNewResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -131,6 +129,8 @@ func TestNewResolversContainerFactory_NilBlockchainShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -148,6 +148,27 @@ func TestNewResolversContainerFactory_NilMarshalizerShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) +} + +func TestNewResolversContainerFactory_NilMarshalizerAndSizeShouldErr(t *testing.T) { + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + nil, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + &mock.TrieStub{}, + 1, ) assert.Nil(t, rcf) @@ -165,6 +186,8 @@ func TestNewResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { nil, &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -182,6 +205,8 @@ func TestNewResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testin createDataPools(), nil, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) @@ -199,12 +224,33 @@ func TestNewResolversContainerFactory_NilSliceSplitterShouldErr(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, nil, + &mock.TrieStub{}, + 0, ) assert.Nil(t, rcf) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } +func TestNewResolversContainerFactory_NilTrieDataGetterShouldErr(t *testing.T) { + t.Parallel() + + rcf, err := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", ""), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + nil, + 0, + ) + + assert.Nil(t, rcf) + assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) +} + func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { t.Parallel() @@ -216,6 +262,8 @@ func TestNewResolversContainerFactory_ShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) assert.NotNil(t, rcf) @@ -235,6 +283,8 @@ func TestResolversContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testin createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -248,12 +298,14 @@ func TestResolversContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testi rcf, _ := shard.NewResolversContainerFactory( mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler(factory.HeadersTopic, ""), + createStubTopicMessageHandler(factory.ShardBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -273,6 +325,8 @@ func TestResolversContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -292,6 +346,8 @@ func TestResolversContainerFactory_CreateTopicCreationPeerChBlocksFailsShouldErr createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -311,6 +367,8 @@ func TestResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -324,12 +382,14 @@ func TestResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) rcf, _ := shard.NewResolversContainerFactory( mock.NewOneShardCoordinatorMock(), - createStubTopicMessageHandler("", factory.HeadersTopic), + createStubTopicMessageHandler("", factory.ShardBlocksTopic), createStore(), &mock.MarshalizerMock{}, createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -349,6 +409,8 @@ func TestResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *tes createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -368,6 +430,29 @@ func TestResolversContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t *t createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, + ) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestResolversContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t *testing.T) { + t.Parallel() + + rcf, _ := shard.NewResolversContainerFactory( + mock.NewOneShardCoordinatorMock(), + createStubTopicMessageHandler("", factory.TrieNodesTopic), + createStore(), + &mock.MarshalizerMock{}, + createDataPools(), + &mock.Uint64ByteSliceConverterMock{}, + &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -387,6 +472,8 @@ func TestResolversContainerFactory_CreateShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, err := rcf.Create() @@ -412,6 +499,8 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { createDataPools(), &mock.Uint64ByteSliceConverterMock{}, &mock.DataPackerStub{}, + &mock.TrieStub{}, + 0, ) container, _ := rcf.Create() @@ -422,10 +511,10 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolverHeaders := 1 numResolverMiniBlocks := noOfShards + 1 numResolverPeerChanges := 1 - numResolverMetachainShardHeaders := 1 numResolverMetaBlockHeaders := 1 + numResolverTrieNodes := 2 totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + - numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs + numResolverTrieNodes assert.Equal(t, totalResolvers, container.Len()) } diff --git a/dataRetriever/factory/txpool/txPoolFactory.go b/dataRetriever/factory/txpool/txPoolFactory.go new file mode 100644 index 00000000000..871d3cbe945 --- /dev/null +++ b/dataRetriever/factory/txpool/txPoolFactory.go @@ -0,0 +1,20 @@ +package txpool + +import ( + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// CreateTxPool creates a new tx pool, according to the configuration +func CreateTxPool(config storageUnit.CacheConfig) (dataRetriever.ShardedDataCacherNotifier, error) { + switch config.Type { + case storageUnit.FIFOShardedCache: + return shardedData.NewShardedData(config) + case storageUnit.LRUCache: + return shardedData.NewShardedData(config) + default: + return txpool.NewShardedTxPool(config) + } +} diff --git a/dataRetriever/factory/txpool/txPoolFactory_test.go b/dataRetriever/factory/txpool/txPoolFactory_test.go new file mode 100644 index 00000000000..ffadc74a60e --- /dev/null +++ b/dataRetriever/factory/txpool/txPoolFactory_test.go @@ -0,0 +1,27 @@ +package txpool + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/require" +) + +func Test_CreateNewTxPool_ShardedData(t *testing.T) { + config := storageUnit.CacheConfig{Type: storageUnit.FIFOShardedCache, Size: 100, Shards: 1} + txPool, err := CreateTxPool(config) + require.Nil(t, err) + require.NotNil(t, txPool) + + config = storageUnit.CacheConfig{Type: storageUnit.LRUCache, Size: 100, Shards: 1} + txPool, err = CreateTxPool(config) + require.Nil(t, err) + require.NotNil(t, txPool) +} + +func Test_CreateNewTxPool_ShardedTxPool(t *testing.T) { + config := storageUnit.CacheConfig{Size: 100, Shards: 1} + txPool, err := CreateTxPool(config) + require.Nil(t, err) + require.NotNil(t, txPool) +} diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 1fbec036bf8..9e257effd3a 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -35,10 +35,12 @@ const ( MetaHdrNonceHashDataUnit UnitType = 9 // HeartbeatUnit is the heartbeat storage unit identifier HeartbeatUnit UnitType = 10 + // MiniBlockHeaderUnit is the miniblock header data unit identifier + MiniBlockHeaderUnit UnitType = 11 // BootstrapUnit is the bootstrap storage unit identifier - BootstrapUnit UnitType = 11 + BootstrapUnit UnitType = 12 //StatusMetricsUnit is the status metrics storage unit identifier - StatusMetricsUnit UnitType = 12 + StatusMetricsUnit UnitType = 13 // ShardHdrNonceHashDataUnit is the header nonce-hash pair data unit identifier //TODO: Add only unit types lower than 100 @@ -59,6 +61,8 @@ type Resolver interface { type HeaderResolver interface { Resolver RequestDataFromNonce(nonce uint64) error + RequestDataFromEpoch(identifier []byte) error + SetEpochHandler(epochHandler EpochHandler) error } // MiniBlocksResolver defines what a mini blocks resolver should do @@ -103,6 +107,12 @@ type ResolversContainerFactory interface { IsInterfaceNil() bool } +// EpochHandler defines the functionality to get the current epoch +type EpochHandler interface { + Epoch() uint32 + IsInterfaceNil() bool +} + // MessageHandler defines the functionality needed by structs to send data to other peers type MessageHandler interface { ConnectedPeersOnTopic(topic string) []p2p.PeerID @@ -176,7 +186,6 @@ type ShardedDataCacherNotifier interface { RemoveSetOfDataFromPool(keys [][]byte, cacheId string) RemoveDataFromAllShards(key []byte) MergeShardStores(sourceCacheID, destCacheID string) - MoveData(sourceCacheID, destCacheID string, key [][]byte) Clear() ClearShardStore(cacheId string) CreateShardStore(cacheId string) @@ -191,14 +200,18 @@ type ShardIdHashMap interface { IsInterfaceNil() bool } -// Uint64SyncMapCacher defines a cacher-type struct that uses uint64 keys and sync-maps values -type Uint64SyncMapCacher interface { +// HeadersPool defines what a headers pool structure can perform +type HeadersPool interface { Clear() - Get(nonce uint64) (ShardIdHashMap, bool) - Merge(nonce uint64, src ShardIdHashMap) - Remove(nonce uint64, shardId uint32) - RegisterHandler(handler func(nonce uint64, shardId uint32, value []byte)) - Has(nonce uint64, shardId uint32) bool + AddHeader(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHash(headerHash []byte) + RemoveHeaderByNonceAndShardId(headerNonce uint64, shardId uint32) + GetHeadersByNonceAndShardId(headerNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHash(hash []byte) (data.HeaderHandler, error) + RegisterHandler(handler func(headerHandler data.HeaderHandler, headerHash []byte)) + Nonces(shardId uint32) []uint64 + Len() int + MaxSize() int IsInterfaceNil() bool } @@ -215,21 +228,19 @@ type PoolsHolder interface { Transactions() ShardedDataCacherNotifier UnsignedTransactions() ShardedDataCacherNotifier RewardTransactions() ShardedDataCacherNotifier - Headers() storage.Cacher - HeadersNonces() Uint64SyncMapCacher + Headers() HeadersPool MiniBlocks() storage.Cacher PeerChangesBlocks() storage.Cacher - MetaBlocks() storage.Cacher + TrieNodes() storage.Cacher CurrentBlockTxs() TransactionCacher IsInterfaceNil() bool } // MetaPoolsHolder defines getter for data pools for metachain type MetaPoolsHolder interface { - MetaBlocks() storage.Cacher MiniBlocks() storage.Cacher - ShardHeaders() storage.Cacher - HeadersNonces() Uint64SyncMapCacher + Headers() HeadersPool + TrieNodes() storage.Cacher Transactions() ShardedDataCacherNotifier UnsignedTransactions() ShardedDataCacherNotifier CurrentBlockTxs() TransactionCacher @@ -263,6 +274,12 @@ type DataPacker interface { IsInterfaceNil() bool } +// TrieDataGetter returns requested data from the trie +type TrieDataGetter interface { + GetSerializedNodes([]byte, uint64) ([][]byte, error) + IsInterfaceNil() bool +} + // RequestedItemsHandler can determine if a certain key has or not been requested type RequestedItemsHandler interface { Add(key string) error diff --git a/dataRetriever/mock/hasherMock.go b/dataRetriever/mock/hasherMock.go index 383b8f49977..17b88ebcbaa 100644 --- a/dataRetriever/mock/hasherMock.go +++ b/dataRetriever/mock/hasherMock.go @@ -11,7 +11,7 @@ type HasherMock struct { // Compute will output the SHA's equivalent of the input string func (sha HasherMock) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -29,9 +29,6 @@ func (HasherMock) Size() int { } // IsInterfaceNil returns true if there is no value under the interface -func (sha *HasherMock) IsInterfaceNil() bool { - if sha == nil { - return true - } +func (sha HasherMock) IsInterfaceNil() bool { return false } diff --git a/dataRetriever/mock/headerResolverStub.go b/dataRetriever/mock/headerResolverStub.go index 0bfe959d45e..d2bb9ccd2e0 100644 --- a/dataRetriever/mock/headerResolverStub.go +++ b/dataRetriever/mock/headerResolverStub.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/pkg/errors" ) @@ -11,6 +12,22 @@ type HeaderResolverStub struct { RequestDataFromHashCalled func(hash []byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error RequestDataFromNonceCalled func(nonce uint64) error + RequestDataFromEpochCalled func(identifier []byte) error + SetEpochHandlerCalled func(epochHandler dataRetriever.EpochHandler) error +} + +func (hrs *HeaderResolverStub) RequestDataFromEpoch(identifier []byte) error { + if hrs.RequestDataFromEpochCalled != nil { + return hrs.RequestDataFromEpochCalled(identifier) + } + return nil +} + +func (hrs *HeaderResolverStub) SetEpochHandler(epochHandler dataRetriever.EpochHandler) error { + if hrs.SetEpochHandlerCalled != nil { + return hrs.SetEpochHandlerCalled(epochHandler) + } + return nil } func (hrs *HeaderResolverStub) RequestDataFromHash(hash []byte) error { diff --git a/dataRetriever/mock/headersCacherStub.go b/dataRetriever/mock/headersCacherStub.go new file mode 100644 index 00000000000..b5ad942608e --- /dev/null +++ b/dataRetriever/mock/headersCacherStub.go @@ -0,0 +1,79 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/data" + +type HeadersCacherStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int +} + +func (hcs *HeadersCacherStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hcs.AddCalled != nil { + hcs.AddCalled(headerHash, header) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByHash(headerHash []byte) { + if hcs.RemoveHeaderByHashCalled != nil { + hcs.RemoveHeaderByHashCalled(headerHash) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hcs.RemoveHeaderByNonceAndShardIdCalled != nil { + hcs.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +func (hcs *HeadersCacherStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hcs.GetHeaderByNonceAndShardIdCalled != nil { + return hcs.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, nil +} + +func (hcs *HeadersCacherStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hcs.GetHeaderByHashCalled != nil { + return hcs.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +func (hcs *HeadersCacherStub) Clear() { + if hcs.ClearCalled != nil { + hcs.ClearCalled() + } +} + +func (hcs *HeadersCacherStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hcs.RegisterHandlerCalled != nil { + hcs.RegisterHandlerCalled(handler) + } +} + +func (hcs *HeadersCacherStub) Nonces(shardId uint32) []uint64 { + if hcs.NoncesCalled != nil { + return hcs.NoncesCalled(shardId) + } + return nil +} + +func (hcs *HeadersCacherStub) Len() int { + return 0 +} + +func (hcs *HeadersCacherStub) MaxSize() int { + return 100 +} + +func (hcs *HeadersCacherStub) IsInterfaceNil() bool { + return hcs == nil +} diff --git a/dataRetriever/mock/metaPoolsHolderStub.go b/dataRetriever/mock/metaPoolsHolderStub.go index 41d87fe259f..8bbf91b711f 100644 --- a/dataRetriever/mock/metaPoolsHolderStub.go +++ b/dataRetriever/mock/metaPoolsHolderStub.go @@ -6,10 +6,9 @@ import ( ) type MetaPoolsHolderStub struct { - MetaBlocksCalled func() storage.Cacher MiniBlocksCalled func() storage.Cacher - ShardHeadersCalled func() storage.Cacher - HeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher + TrieNodesCalled func() storage.Cacher + HeadersCalled func() dataRetriever.HeadersPool TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier CurrBlockTxsCalled func() dataRetriever.TransactionCacher @@ -27,20 +26,16 @@ func (mphs *MetaPoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDat return mphs.UnsignedTransactionsCalled() } -func (mphs *MetaPoolsHolderStub) MetaBlocks() storage.Cacher { - return mphs.MetaBlocksCalled() -} - func (mphs *MetaPoolsHolderStub) MiniBlocks() storage.Cacher { return mphs.MiniBlocksCalled() } -func (mphs *MetaPoolsHolderStub) ShardHeaders() storage.Cacher { - return mphs.ShardHeadersCalled() +func (mphs *MetaPoolsHolderStub) Headers() dataRetriever.HeadersPool { + return mphs.HeadersCalled() } -func (mphs *MetaPoolsHolderStub) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return mphs.HeadersNoncesCalled() +func (mphs *MetaPoolsHolderStub) TrieNodes() storage.Cacher { + return mphs.TrieNodesCalled() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/mock/poolsHolderStub.go b/dataRetriever/mock/poolsHolderStub.go index 35a1d5e92db..1d1d4ff05d0 100644 --- a/dataRetriever/mock/poolsHolderStub.go +++ b/dataRetriever/mock/poolsHolderStub.go @@ -6,14 +6,13 @@ import ( ) type PoolsHolderStub struct { - HeadersCalled func() storage.Cacher - HeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher + HeadersCalled func() dataRetriever.HeadersPool PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher - MetaBlocksCalled func() storage.Cacher + TrieNodesCalled func() storage.Cacher CurrBlockTxsCalled func() dataRetriever.TransactionCacher } @@ -21,14 +20,10 @@ func (phs *PoolsHolderStub) CurrentBlockTxs() dataRetriever.TransactionCacher { return phs.CurrBlockTxsCalled() } -func (phs *PoolsHolderStub) Headers() storage.Cacher { +func (phs *PoolsHolderStub) Headers() dataRetriever.HeadersPool { return phs.HeadersCalled() } -func (phs *PoolsHolderStub) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phs.HeadersNoncesCalled() -} - func (phs *PoolsHolderStub) PeerChangesBlocks() storage.Cacher { return phs.PeerChangesBlocksCalled() } @@ -41,10 +36,6 @@ func (phs *PoolsHolderStub) MiniBlocks() storage.Cacher { return phs.MiniBlocksCalled() } -func (phs *PoolsHolderStub) MetaBlocks() storage.Cacher { - return phs.MetaBlocksCalled() -} - func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { return phs.UnsignedTransactionsCalled() } @@ -53,6 +44,10 @@ func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacher return phs.RewardTransactionsCalled() } +func (phs *PoolsHolderStub) TrieNodes() storage.Cacher { + return phs.TrieNodesCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/dataRetriever/mock/shardedDataStub.go b/dataRetriever/mock/shardedDataStub.go index 688a94904dd..3fa0868838e 100644 --- a/dataRetriever/mock/shardedDataStub.go +++ b/dataRetriever/mock/shardedDataStub.go @@ -47,10 +47,6 @@ func (sd *ShardedDataStub) MergeShardStores(sourceCacheId, destCacheId string) { sd.MergeShardStoresCalled(sourceCacheId, destCacheId) } -func (sd *ShardedDataStub) MoveData(sourceCacheId, destCacheId string, key [][]byte) { - sd.MoveDataCalled(sourceCacheId, destCacheId, key) -} - func (sd *ShardedDataStub) Clear() { sd.ClearCalled() } diff --git a/dataRetriever/mock/trieStub.go b/dataRetriever/mock/trieStub.go new file mode 100644 index 00000000000..840e318a244 --- /dev/null +++ b/dataRetriever/mock/trieStub.go @@ -0,0 +1,162 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type TrieStub struct { + GetCalled func(key []byte) ([]byte, error) + UpdateCalled func(key, value []byte) error + DeleteCalled func(key []byte) error + RootCalled func() ([]byte, error) + ProveCalled func(key []byte) ([][]byte, error) + VerifyProofCalled func(proofs [][]byte, key []byte) (bool, error) + CommitCalled func() error + RecreateCalled func(root []byte) (data.Trie, error) + DeepCloneCalled func() (data.Trie, error) + CancelPruneCalled func(rootHash []byte, identifier data.TriePruningIdentifier) + PruneCalled func(rootHash []byte, identifier data.TriePruningIdentifier) error + ResetOldHashesCalled func() [][]byte + AppendToOldHashesCalled func([][]byte) + TakeSnapshotCalled func() error + SetCheckpointCalled func() error + GetSerializedNodesCalled func([]byte, uint64) ([][]byte, error) + DatabaseCalled func() data.DBWriteCacher +} + +func (ts *TrieStub) Get(key []byte) ([]byte, error) { + if ts.GetCalled != nil { + return ts.GetCalled(key) + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) Update(key, value []byte) error { + if ts.UpdateCalled != nil { + return ts.UpdateCalled(key, value) + } + + return errNotImplemented +} + +func (ts *TrieStub) Delete(key []byte) error { + if ts.DeleteCalled != nil { + return ts.DeleteCalled(key) + } + + return errNotImplemented +} + +func (ts *TrieStub) Root() ([]byte, error) { + if ts.RootCalled != nil { + return ts.RootCalled() + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) Prove(key []byte) ([][]byte, error) { + if ts.ProveCalled != nil { + return ts.ProveCalled(key) + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) VerifyProof(proofs [][]byte, key []byte) (bool, error) { + if ts.VerifyProofCalled != nil { + return ts.VerifyProofCalled(proofs, key) + } + + return false, errNotImplemented +} + +func (ts *TrieStub) Commit() error { + if ts != nil { + return ts.CommitCalled() + } + + return errNotImplemented +} + +func (ts *TrieStub) Recreate(root []byte) (data.Trie, error) { + if ts.RecreateCalled != nil { + return ts.RecreateCalled(root) + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) String() string { + return "stub trie" +} + +func (ts *TrieStub) DeepClone() (data.Trie, error) { + return ts.DeepCloneCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ts *TrieStub) IsInterfaceNil() bool { + return ts == nil +} + +// CancelPrune invalidates the hashes that correspond to the given root hash from the eviction waiting list +func (ts *TrieStub) CancelPrune(rootHash []byte, identifier data.TriePruningIdentifier) { + if ts.CancelPruneCalled != nil { + ts.CancelPruneCalled(rootHash, identifier) + } +} + +// Prune removes from the database all the old hashes that correspond to the given root hash +func (ts *TrieStub) Prune(rootHash []byte, identifier data.TriePruningIdentifier) error { + if ts.PruneCalled != nil { + return ts.PruneCalled(rootHash, identifier) + } + + return errNotImplemented +} + +// ResetOldHashes resets the oldHashes and oldRoot variables and returns the old hashes +func (ts *TrieStub) ResetOldHashes() [][]byte { + if ts.ResetOldHashesCalled != nil { + return ts.ResetOldHashesCalled() + } + + return nil +} + +// AppendToOldHashes appends the given hashes to the trie's oldHashes variable +func (ts *TrieStub) AppendToOldHashes(hashes [][]byte) { + if ts.AppendToOldHashesCalled != nil { + ts.AppendToOldHashesCalled(hashes) + } +} + +func (ts *TrieStub) TakeSnapshot() error { + if ts.TakeSnapshotCalled != nil { + return ts.TakeSnapshotCalled() + } + return nil +} + +func (ts *TrieStub) SetCheckpoint() error { + if ts.SetCheckpointCalled != nil { + return ts.SetCheckpointCalled() + } + return nil +} + +func (ts *TrieStub) GetSerializedNodes(hash []byte, maxBuffToSend uint64) ([][]byte, error) { + if ts.GetSerializedNodesCalled != nil { + return ts.GetSerializedNodesCalled(hash, maxBuffToSend) + } + return nil, nil +} + +func (ts *TrieStub) Database() data.DBWriteCacher { + if ts.DatabaseCalled != nil { + return ts.DatabaseCalled() + } + return nil +} diff --git a/dataRetriever/requestData.go b/dataRetriever/requestData.go index 0972ce6efea..2a0b530f5b6 100644 --- a/dataRetriever/requestData.go +++ b/dataRetriever/requestData.go @@ -30,6 +30,8 @@ const ( HashArrayType // NonceType indicates that the request data object is of type nonce (uint64) NonceType + // EpochType indicates that the request data object is of type epoch + EpochType ) // RequestData holds the requested data diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index c5a75b27410..df0a6e53e4f 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -4,23 +4,19 @@ import ( "fmt" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" ) type resolverRequestHandler struct { resolversFinder dataRetriever.ResolversFinder requestedItemsHandler dataRetriever.RequestedItemsHandler - txRequestTopic string - scrRequestTopic string - rewardTxRequestTopic string - mbRequestTopic string - shardHdrRequestTopic string - metaHdrRequestTopic string - isMetaChain bool + shardID uint32 maxTxsToRequest int sweepTime time.Time } @@ -31,13 +27,8 @@ var log = logger.GetOrCreate("dataretriever/requesthandlers") func NewShardResolverRequestHandler( finder dataRetriever.ResolversFinder, requestedItemsHandler dataRetriever.RequestedItemsHandler, - txRequestTopic string, - scrRequestTopic string, - rewardTxRequestTopic string, - mbRequestTopic string, - shardHdrRequestTopic string, - metaHdrRequestTopic string, maxTxsToRequest int, + shardID uint32, ) (*resolverRequestHandler, error) { if check.IfNil(finder) { @@ -46,24 +37,6 @@ func NewShardResolverRequestHandler( if check.IfNil(requestedItemsHandler) { return nil, dataRetriever.ErrNilRequestedItemsHandler } - if len(txRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyTxRequestTopic - } - if len(scrRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyScrRequestTopic - } - if len(rewardTxRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyRewardTxRequestTopic - } - if len(mbRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyMiniBlockRequestTopic - } - if len(shardHdrRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyShardHeaderRequestTopic - } - if len(metaHdrRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyMetaHeaderRequestTopic - } if maxTxsToRequest < 1 { return nil, dataRetriever.ErrInvalidMaxTxRequest } @@ -71,13 +44,7 @@ func NewShardResolverRequestHandler( rrh := &resolverRequestHandler{ resolversFinder: finder, requestedItemsHandler: requestedItemsHandler, - txRequestTopic: txRequestTopic, - mbRequestTopic: mbRequestTopic, - shardHdrRequestTopic: shardHdrRequestTopic, - metaHdrRequestTopic: metaHdrRequestTopic, - scrRequestTopic: scrRequestTopic, - rewardTxRequestTopic: rewardTxRequestTopic, - isMetaChain: false, + shardID: shardID, maxTxsToRequest: maxTxsToRequest, } @@ -90,11 +57,6 @@ func NewShardResolverRequestHandler( func NewMetaResolverRequestHandler( finder dataRetriever.ResolversFinder, requestedItemsHandler dataRetriever.RequestedItemsHandler, - shardHdrRequestTopic string, - metaHdrRequestTopic string, - txRequestTopic string, - scrRequestTopic string, - mbRequestTopic string, maxTxsToRequest int, ) (*resolverRequestHandler, error) { @@ -104,21 +66,6 @@ func NewMetaResolverRequestHandler( if check.IfNil(requestedItemsHandler) { return nil, dataRetriever.ErrNilRequestedItemsHandler } - if len(shardHdrRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyShardHeaderRequestTopic - } - if len(metaHdrRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyMetaHeaderRequestTopic - } - if len(txRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyTxRequestTopic - } - if len(scrRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyScrRequestTopic - } - if len(mbRequestTopic) == 0 { - return nil, dataRetriever.ErrEmptyMiniBlockRequestTopic - } if maxTxsToRequest < 1 { return nil, dataRetriever.ErrInvalidMaxTxRequest } @@ -126,12 +73,7 @@ func NewMetaResolverRequestHandler( rrh := &resolverRequestHandler{ resolversFinder: finder, requestedItemsHandler: requestedItemsHandler, - shardHdrRequestTopic: shardHdrRequestTopic, - metaHdrRequestTopic: metaHdrRequestTopic, - txRequestTopic: txRequestTopic, - mbRequestTopic: mbRequestTopic, - scrRequestTopic: scrRequestTopic, - isMetaChain: true, + shardID: sharding.MetachainShardId, maxTxsToRequest: maxTxsToRequest, } @@ -140,7 +82,7 @@ func NewMetaResolverRequestHandler( // RequestTransaction method asks for transactions from the connected peers func (rrh *resolverRequestHandler) RequestTransaction(destShardID uint32, txHashes [][]byte) { - rrh.requestByHashes(destShardID, txHashes, rrh.txRequestTopic) + rrh.requestByHashes(destShardID, txHashes, factory.TransactionTopic) } func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [][]byte, topic string) { @@ -161,7 +103,7 @@ func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [] txResolver, ok := resolver.(HashSliceResolver) if !ok { - log.Debug("wrong assertion type when creating transaction resolver") + log.Warn("wrong assertion type when creating transaction resolver") return } @@ -184,34 +126,30 @@ func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [] // RequestUnsignedTransactions method asks for unsigned transactions from the connected peers func (rrh *resolverRequestHandler) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) { - rrh.requestByHashes(destShardID, scrHashes, rrh.scrRequestTopic) + rrh.requestByHashes(destShardID, scrHashes, factory.UnsignedTransactionTopic) } // RequestRewardTransactions requests for reward transactions from the connected peers func (rrh *resolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte) { - rrh.requestByHashes(destShardId, rewardTxHashes, rrh.rewardTxRequestTopic) + rrh.requestByHashes(destShardId, rewardTxHashes, factory.RewardsTransactionTopic) } // RequestMiniBlock method asks for miniblocks from the connected peers func (rrh *resolverRequestHandler) RequestMiniBlock(destShardID uint32, miniblockHash []byte) { - rrh.sweepIfNeeded() - - if rrh.requestedItemsHandler.Has(string(miniblockHash)) { - log.Trace("item already requested", - "key", miniblockHash) + if !rrh.testIfRequestIsNeeded(miniblockHash) { return } log.Trace("requesting miniblock from network", "hash", miniblockHash, "shard", destShardID, - "topic", rrh.mbRequestTopic, + "topic", factory.MiniBlocksTopic, ) - resolver, err := rrh.resolversFinder.CrossShardResolver(rrh.mbRequestTopic, destShardID) + resolver, err := rrh.resolversFinder.CrossShardResolver(factory.MiniBlocksTopic, destShardID) if err != nil { log.Error("missing resolver", - "topic", rrh.mbRequestTopic, + "topic", factory.MiniBlocksTopic, "shard", destShardID, ) return @@ -223,37 +161,86 @@ func (rrh *resolverRequestHandler) RequestMiniBlock(destShardID uint32, minibloc return } - err = rrh.requestedItemsHandler.Add(string(miniblockHash)) + rrh.addRequestedItem(miniblockHash) +} + +// RequestShardHeader method asks for shard header from the connected peers +func (rrh *resolverRequestHandler) RequestShardHeader(shardId uint32, hash []byte) { + if !rrh.testIfRequestIsNeeded(hash) { + return + } + + headerResolver, err := rrh.getShardHeaderResolver(shardId) if err != nil { - log.Trace("add requested item with error", + log.Error("getShardHeaderResolver", "error", err.Error(), - "key", miniblockHash) + ) + return + } + + err = headerResolver.RequestDataFromHash(hash) + if err != nil { + log.Debug("RequestShardHeader", "error", err.Error()) + return } + + rrh.addRequestedItem(hash) } -// RequestHeader method asks for header from the connected peers -func (rrh *resolverRequestHandler) RequestHeader(destShardID uint32, hash []byte) { - rrh.sweepIfNeeded() +// RequestMetaHeader method asks for meta header from the connected peers +func (rrh *resolverRequestHandler) RequestMetaHeader(hash []byte) { + if !rrh.testIfRequestIsNeeded(hash) { + return + } - if rrh.requestedItemsHandler.Has(string(hash)) { - log.Trace("item already requested", - "key", hash) + resolver, err := rrh.getMetaHeaderResolver() + if err != nil { + log.Error("RequestMetaHeader", + "error", err.Error(), + ) return } - //TODO: Refactor this class and create specific methods for requesting shard or meta data - var baseTopic string - if destShardID == sharding.MetachainShardId { - baseTopic = rrh.metaHdrRequestTopic - } else { - baseTopic = rrh.shardHdrRequestTopic + err = resolver.RequestDataFromHash(hash) + if err != nil { + log.Debug("RequestMetaHeader, RequestDataFromHash", "error", err.Error()) + return } - log.Trace("requesting by hash", - "topic", baseTopic, - "shard", destShardID, - "hash", hash, - ) + rrh.addRequestedItem(hash) +} + +// RequestShardHeaderByNonce method asks for shard header from the connected peers by nonce +func (rrh *resolverRequestHandler) RequestShardHeaderByNonce(shardId uint32, nonce uint64) { + key := []byte(fmt.Sprintf("%d-%d", shardId, nonce)) + if !rrh.testIfRequestIsNeeded(key) { + return + } + + headerResolver, err := rrh.getShardHeaderResolver(shardId) + if err != nil { + log.Error("getShardHeaderResolver", + "error", err.Error(), + ) + return + } + + err = headerResolver.RequestDataFromNonce(nonce) + if err != nil { + log.Debug("RequestShardHeaderByNonce", "error", err.Error()) + return + } + + rrh.addRequestedItem(key) +} + +// RequestTrieNodes method asks for trie nodes from the connected peers +func (rrh *resolverRequestHandler) RequestTrieNodes(shardId uint32, hash []byte) { + rrh.requestByHash(shardId, hash, factory.TrieNodesTopic) +} + +func (rrh *resolverRequestHandler) requestByHash(destShardID uint32, hash []byte, baseTopic string) { + log.Debug(fmt.Sprintf("Requesting %s from shard %d with hash %s from network\n", baseTopic, destShardID, core.ToB64(hash))) var resolver dataRetriever.Resolver var err error @@ -265,53 +252,133 @@ func (rrh *resolverRequestHandler) RequestHeader(destShardID uint32, hash []byte } if err != nil { - log.Error("missing resolver", - "topic", baseTopic, - "shard", destShardID, - ) + log.Error(fmt.Sprintf("missing resolver to %s topic to shard %d", baseTopic, destShardID)) return } err = resolver.RequestDataFromHash(hash) if err != nil { - log.Debug("RequestDataFromHash", "error", err.Error()) + log.Debug(err.Error()) + } +} + +// RequestMetaHeaderByNonce method asks for meta header from the connected peers by nonce +func (rrh *resolverRequestHandler) RequestMetaHeaderByNonce(nonce uint64) { + key := []byte(fmt.Sprintf("%d-%d", sharding.MetachainShardId, nonce)) + if !rrh.testIfRequestIsNeeded(key) { return } - err = rrh.requestedItemsHandler.Add(string(hash)) + headerResolver, err := rrh.getMetaHeaderResolver() if err != nil { - log.Trace("add requested item with error", + log.Error("getMetaHeaderResolver", "error", err.Error(), - "key", hash) + ) + return } + + err = headerResolver.RequestDataFromNonce(nonce) + if err != nil { + log.Debug("RequestMetaHeaderByNonce", "error", err.Error()) + return + } + + rrh.addRequestedItem(key) } -// RequestHeaderByNonce method asks for transactions from the connected peers -func (rrh *resolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonce uint64) { +func (rrh *resolverRequestHandler) testIfRequestIsNeeded(key []byte) bool { rrh.sweepIfNeeded() - key := fmt.Sprintf("%d-%d", destShardID, nonce) - if rrh.requestedItemsHandler.Has(key) { + if rrh.requestedItemsHandler.Has(string(key)) { log.Trace("item already requested", "key", key) - return + return false } - var err error - var resolver dataRetriever.Resolver - var topic string - if rrh.isMetaChain { - topic = rrh.shardHdrRequestTopic - resolver, err = rrh.resolversFinder.CrossShardResolver(topic, destShardID) - } else { - topic = rrh.metaHdrRequestTopic - resolver, err = rrh.resolversFinder.MetaChainResolver(topic) + return true +} + +func (rrh *resolverRequestHandler) addRequestedItem(key []byte) { + err := rrh.requestedItemsHandler.Add(string(key)) + if err != nil { + log.Debug("add requested item with error", + "error", err.Error(), + "key", key) + } +} + +func (rrh *resolverRequestHandler) getShardHeaderResolver(shardId uint32) (dataRetriever.HeaderResolver, error) { + isMetachainNode := rrh.shardID == sharding.MetachainShardId + shardIdMissmatch := rrh.shardID != shardId + requestOnMetachain := shardId == sharding.MetachainShardId + isRequestInvalid := (!isMetachainNode && shardIdMissmatch) || requestOnMetachain + if isRequestInvalid { + return nil, dataRetriever.ErrBadRequest + } + + //requests should be done on the topic shardBlocks_0_META so that is why we need to figure out + //the cross shard id + crossShardId := sharding.MetachainShardId + if isMetachainNode { + crossShardId = shardId } + resolver, err := rrh.resolversFinder.CrossShardResolver(factory.ShardBlocksTopic, crossShardId) if err != nil { - log.Debug("missing resolver", - "topic", topic, - "shard", destShardID, + err = fmt.Errorf("%w, topic: %s, current shard ID: %d, cross shard ID: %d", + err, factory.ShardBlocksTopic, rrh.shardID, crossShardId) + return nil, err + } + + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + if !ok { + err = fmt.Errorf("%w, topic: %s, current shard ID: %d, cross shard ID: %d, expected HeaderResolver", + dataRetriever.ErrWrongTypeInContainer, factory.ShardBlocksTopic, rrh.shardID, crossShardId) + return nil, err + } + + return headerResolver, nil +} + +func (rrh *resolverRequestHandler) getMetaHeaderResolver() (dataRetriever.HeaderResolver, error) { + resolver, err := rrh.resolversFinder.MetaChainResolver(factory.MetachainBlocksTopic) + if err != nil { + err = fmt.Errorf("%w, topic: %s, current shard ID: %d", + err, factory.MetachainBlocksTopic, rrh.shardID) + return nil, err + } + + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + if !ok { + err = fmt.Errorf("%w, topic: %s, current shard ID: %d, expected HeaderResolver", + dataRetriever.ErrWrongTypeInContainer, factory.ShardBlocksTopic, rrh.shardID) + return nil, err + } + + return headerResolver, nil +} + +// RequestStartOfEpochMetaBlock method asks for the start of epoch metablock from the connected peers +func (rrh *resolverRequestHandler) RequestStartOfEpochMetaBlock(epoch uint32) { + rrh.sweepIfNeeded() + + epochStartIdentifier := core.EpochStartIdentifier(epoch) + if rrh.requestedItemsHandler.Has(epochStartIdentifier) { + log.Trace("item already requested", + "key", epochStartIdentifier) + return + } + + baseTopic := factory.MetachainBlocksTopic + log.Trace("requesting header by epoch", + "topic", baseTopic, + "hash", epochStartIdentifier, + ) + + resolver, err := rrh.resolversFinder.MetaChainResolver(baseTopic) + if err != nil { + log.Error("missing resolver", + "topic", baseTopic, ) return } @@ -319,32 +386,28 @@ func (rrh *resolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonc headerResolver, ok := resolver.(dataRetriever.HeaderResolver) if !ok { log.Debug("resolver is not a header resolver", - "topic", topic, - "shard", destShardID, + "topic", baseTopic, ) return } - err = headerResolver.RequestDataFromNonce(nonce) + err = headerResolver.RequestDataFromEpoch([]byte(epochStartIdentifier)) if err != nil { - log.Debug("RequestDataFromNonce", "error", err.Error()) + log.Debug("RequestDataFromEpoch", "error", err.Error()) return } - err = rrh.requestedItemsHandler.Add(key) + err = rrh.requestedItemsHandler.Add(epochStartIdentifier) if err != nil { log.Trace("add requested item with error", "error", err.Error(), - "key", key) + "key", epochStartIdentifier) } } // IsInterfaceNil returns true if there is no value under the interface func (rrh *resolverRequestHandler) IsInterfaceNil() bool { - if rrh == nil { - return true - } - return false + return rrh == nil } func (rrh *resolverRequestHandler) getUnrequestedHashes(hashes [][]byte) [][]byte { diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 6821f39bdb2..4042e7ff953 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -12,17 +12,31 @@ import ( var timeoutSendRequests = time.Second * 2 +func createResolversFinderStubThatShouldNotBeCalled(tb testing.TB) *mock.ResolversFinderStub { + return &mock.ResolversFinderStub{ + IntraShardResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, err error) { + assert.Fail(tb, "IntraShardResolverCalled should not have been called") + return nil, nil + }, + MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, err error) { + assert.Fail(tb, "MetaChainResolverCalled should not have been called") + return nil, nil + }, + CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, err error) { + assert.Fail(tb, "CrossShardResolverCalled should not have been called") + return nil, nil + }, + } +} + +//------- NewMetaResolver + func TestNewMetaResolverRequestHandlerNilFinder(t *testing.T) { t.Parallel() rrh, err := NewMetaResolverRequestHandler( nil, &mock.RequestedItemsHandlerStub{}, - "shard topic", - "meta topic", - "tx topic", - "scr topic", - "miniblock topic", 100, ) @@ -36,11 +50,6 @@ func TestNewMetaResolverRequestHandlerNilRequestedItemsHandler(t *testing.T) { rrh, err := NewMetaResolverRequestHandler( &mock.ResolversFinderStub{}, nil, - "shard topic", - "meta topic", - "tx topic", - "scr topic", - "miniblock topic", 100, ) @@ -48,107 +57,12 @@ func TestNewMetaResolverRequestHandlerNilRequestedItemsHandler(t *testing.T) { assert.Equal(t, dataRetriever.ErrNilRequestedItemsHandler, err) } -func TestNewMetaResolverRequestShardHandlerEmptyTopic(t *testing.T) { - t.Parallel() - - rrh, err := NewMetaResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "", - "meta topic", - "tx topic", - "scr topic", - "miniblock topic", - 100, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyShardHeaderRequestTopic, err) -} - -func TestNewMetaResolverRequestMetaHandlerEmptyTopic(t *testing.T) { - t.Parallel() - - rrh, err := NewMetaResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "shard topic", - "", - "tx topic", - "scr topic", - "miniblock topic", - 100, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyMetaHeaderRequestTopic, err) -} - -func TestNewMetaResolverRequestTxHandlerEmptyTopic(t *testing.T) { - t.Parallel() - - rrh, err := NewMetaResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "shard topic", - "meta topic", - "", - "scr topic", - "miniblock topic", - 100, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyTxRequestTopic, err) -} - -func TestNewMetaResolverRequestScrHandlerEmptyTopic(t *testing.T) { - t.Parallel() - - rrh, err := NewMetaResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "shard topic", - "meta topic", - "tx topic", - "", - "miniblock topic", - 100, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyScrRequestTopic, err) -} - -func TestNewMetaResolverRequestMiniBlockHandlerEmptyTopic(t *testing.T) { - t.Parallel() - - rrh, err := NewMetaResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "shard topic", - "meta topic", - "tx topic", - "scr topic", - "", - 100, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyMiniBlockRequestTopic, err) -} - func TestNewMetaResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { t.Parallel() rrh, err := NewMetaResolverRequestHandler( &mock.ResolversFinderStub{}, &mock.RequestedItemsHandlerStub{}, - "shard topic", - "meta topic", - "tx topic", - "scr topic", - "mb topic", 0, ) @@ -162,30 +76,22 @@ func TestNewMetaResolverRequestHandler(t *testing.T) { rrh, err := NewMetaResolverRequestHandler( &mock.ResolversFinderStub{}, &mock.RequestedItemsHandlerStub{}, - "shard topic", - "meta topic", - "tx topic", - "scr topic", - "miniblock topic", 100, ) assert.Nil(t, err) assert.NotNil(t, rrh) } +//------- NewShardResolver + func TestNewShardResolverRequestHandlerNilFinder(t *testing.T) { t.Parallel() rrh, err := NewShardResolverRequestHandler( nil, &mock.RequestedItemsHandlerStub{}, - "topic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) assert.Nil(t, rrh) @@ -198,124 +104,21 @@ func TestNewShardResolverRequestHandlerNilRequestedItemsHandler(t *testing.T) { rrh, err := NewShardResolverRequestHandler( &mock.ResolversFinderStub{}, nil, - "topic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrNilRequestedItemsHandler, err) } -func TestNewShardResolverRequestHandlerTxTopicEmpty(t *testing.T) { - t.Parallel() - - rrh, err := NewShardResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "", - "topic", - "topic", - "topic", - "topic", - "topic", - 1, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyTxRequestTopic, err) -} - -func TestNewShardResolverRequestHandlerScrTopicEmpty(t *testing.T) { - t.Parallel() - - rrh, err := NewShardResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "topic", - "", - "topic", - "topic", - "topic", - "topic", - 1, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyScrRequestTopic, err) -} - -func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { - t.Parallel() - - rrh, err := NewShardResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "topic", - "topic", - "topic", - "", - "topic", - "topic", - 1) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyMiniBlockRequestTopic, err) -} - -func TestNewShardResolverRequestHandlerShardHdrTopicEmpty(t *testing.T) { - t.Parallel() - - rrh, err := NewShardResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "topic", - "topic", - "topic", - "topic", - "", - "topic", - 1) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyShardHeaderRequestTopic, err) -} - -func TestNewShardResolverRequestHandlerMetaHdrTopicEmpty(t *testing.T) { - t.Parallel() - - rrh, err := NewShardResolverRequestHandler( - &mock.ResolversFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - "topic", - "topic", - "topic", - "topic", - "topic", - "", - 1, - ) - - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrEmptyMetaHeaderRequestTopic, err) -} - func TestNewShardResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { t.Parallel() rrh, err := NewShardResolverRequestHandler( &mock.ResolversFinderStub{}, &mock.RequestedItemsHandlerStub{}, - "topic", - "topic", - "topic", - "topic", - "topic", - "topic", + 0, 0, ) @@ -329,13 +132,8 @@ func TestNewShardResolverRequestHandler(t *testing.T) { rrh, err := NewShardResolverRequestHandler( &mock.ResolversFinderStub{}, &mock.RequestedItemsHandlerStub{}, - "topic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) assert.Nil(t, err) @@ -362,13 +160,8 @@ func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardReso }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestTransaction(0, make([][]byte, 0)) @@ -393,13 +186,8 @@ func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestTransaction(0, make([][]byte, 0)) @@ -423,13 +211,8 @@ func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *t }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) @@ -469,13 +252,8 @@ func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic( }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) @@ -509,13 +287,8 @@ func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardResolv }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestMiniBlock(0, make([]byte, 0)) @@ -545,13 +318,8 @@ func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestMiniBlock(0, []byte("mbHash")) @@ -575,13 +343,8 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *t }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestMiniBlock(0, []byte("mbHash")) @@ -589,13 +352,43 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *t assert.True(t, wasCalled) } -//------- RequestHeader +//------- RequestShardHeader + +func TestResolverRequestHandler_RequestShardHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { + t.Parallel() + + rrh, _ := NewShardResolverRequestHandler( + createResolversFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + 1, + 0, + ) + + rrh.RequestShardHeader(0, make([]byte, 0)) +} -func TestResolverRequestHandler_RequestHeaderShouldCallRequestOnResolver(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeaderHashBadRequest(t *testing.T) { + t.Parallel() + + rrh, _ := NewShardResolverRequestHandler( + createResolversFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{}, + 1, + 0, + ) + + rrh.RequestShardHeader(1, make([]byte, 0)) +} + +func TestResolverRequestHandler_RequestShardHeaderShouldCallRequestOnResolver(t *testing.T) { t.Parallel() wasCalled := false - mbResolver := &mock.ResolverStub{ + mbResolver := &mock.HeaderResolverStub{ RequestDataFromHashCalled: func(hash []byte) error { wasCalled = true return nil @@ -609,23 +402,121 @@ func TestResolverRequestHandler_RequestHeaderShouldCallRequestOnResolver(t *test }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, + ) + + rrh.RequestShardHeader(0, []byte("hdrHash")) + + assert.True(t, wasCalled) +} + +//------- RequestMetaHeader + +func TestResolverRequestHandler_RequestMetadHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { + t.Parallel() + + rrh, _ := NewShardResolverRequestHandler( + createResolversFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + 1, + 0, + ) + + rrh.RequestMetaHeader(make([]byte, 0)) +} + +func TestResolverRequestHandler_RequestMetadHeaderHashNotHeaderResolverShouldNotRequest(t *testing.T) { + t.Parallel() + + wasCalled := false + mbResolver := &mock.ResolverStub{ + RequestDataFromHashCalled: func(hash []byte) error { + wasCalled = true + return nil + }, + } + + rrh, _ := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { + return mbResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + 1, + 0, + ) + + rrh.RequestMetaHeader([]byte("hdrHash")) + + assert.False(t, wasCalled) +} + +func TestResolverRequestHandler_RequestMetaHeaderShouldCallRequestOnResolver(t *testing.T) { + t.Parallel() + + wasCalled := false + mbResolver := &mock.HeaderResolverStub{ + RequestDataFromHashCalled: func(hash []byte) error { + wasCalled = true + return nil + }, + } + + rrh, _ := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{ + MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { + return mbResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + 1, + 0, ) - rrh.RequestHeader(0, []byte("hdrHash")) + rrh.RequestMetaHeader([]byte("hdrHash")) assert.True(t, wasCalled) } -//------- RequestHeaderByNonce +//------- RequestShardHeaderByNonce + +func TestResolverRequestHandler_RequestShardHeaderByNonceAlreadyRequestedShouldNotRequest(t *testing.T) { + t.Parallel() + + rrh, _ := NewShardResolverRequestHandler( + createResolversFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + 1, + 0, + ) + + rrh.RequestShardHeaderByNonce(0, 0) +} + +func TestResolverRequestHandler_RequestShardHeaderByNonceBadRequest(t *testing.T) { + t.Parallel() + + rrh, _ := NewShardResolverRequestHandler( + createResolversFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{}, + 1, + 0, + ) + + rrh.RequestShardHeaderByNonce(1, 0) +} -func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsErrorShouldNotPanic(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsErrorShouldNotPanic(t *testing.T) { t.Parallel() defer func() { @@ -639,24 +530,19 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsErrorShoul rrh, _ := NewShardResolverRequestHandler( &mock.ResolversFinderStub{ - MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { + CrossShardResolverCalled: func(baseTopic string, shardID uint32) (resolver dataRetriever.Resolver, e error) { return nil, errExpected }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) - rrh.RequestHeaderByNonce(0, 0) + rrh.RequestShardHeaderByNonce(0, 0) } -func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsAWrongResolverShouldNotPanic(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsAWrongResolverShouldNotPanic(t *testing.T) { t.Parallel() defer func() { @@ -675,24 +561,19 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsAWrongReso rrh, _ := NewShardResolverRequestHandler( &mock.ResolversFinderStub{ - MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { + CrossShardResolverCalled: func(baseTopic string, shardID uint32) (resolver dataRetriever.Resolver, e error) { return hdrResolver, nil }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) - rrh.RequestHeaderByNonce(0, 0) + rrh.RequestShardHeaderByNonce(0, 0) } -func TestResolverRequestHandler_RequestHeaderByNonceShardResolverFailsShouldNotPanic(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeaderByNonceResolverFailsShouldNotPanic(t *testing.T) { t.Parallel() defer func() { @@ -711,24 +592,19 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardResolverFailsShouldNotP rrh, _ := NewShardResolverRequestHandler( &mock.ResolversFinderStub{ - MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { + CrossShardResolverCalled: func(baseTopic string, shardID uint32) (resolver dataRetriever.Resolver, e error) { return hdrResolver, nil }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) - rrh.RequestHeaderByNonce(0, 0) + rrh.RequestShardHeaderByNonce(0, 0) } -func TestResolverRequestHandler_RequestHeaderByNonceShardShouldRequest(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeaderByNonceShouldRequest(t *testing.T) { t.Parallel() wasCalled := false @@ -741,26 +617,40 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardShouldRequest(t *testin rrh, _ := NewShardResolverRequestHandler( &mock.ResolversFinderStub{ - MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { + CrossShardResolverCalled: func(baseTopic string, shardID uint32) (resolver dataRetriever.Resolver, e error) { return hdrResolver, nil }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "topic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) - rrh.RequestHeaderByNonce(0, 0) + rrh.RequestShardHeaderByNonce(0, 0) assert.True(t, wasCalled) } -func TestResolverRequestHandler_RequestHeaderByNonceMetaShouldRequest(t *testing.T) { +//------- RequestMetaHeaderByNonce + +func TestResolverRequestHandler_RequestMetaHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { + t.Parallel() + + rrh, _ := NewShardResolverRequestHandler( + createResolversFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + 1, + 0, + ) + + rrh.RequestMetaHeaderByNonce(0) +} + +func TestResolverRequestHandler_RequestMetaHeaderByNonceShouldRequest(t *testing.T) { t.Parallel() wasCalled := false @@ -771,27 +661,23 @@ func TestResolverRequestHandler_RequestHeaderByNonceMetaShouldRequest(t *testing }, } - rrh, _ := NewMetaResolverRequestHandler( + rrh, _ := NewShardResolverRequestHandler( &mock.ResolversFinderStub{ - CrossShardResolverCalled: func(baseTopic string, destShardID uint32) (resolver dataRetriever.Resolver, e error) { + MetaChainResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { return hdrResolver, nil }, }, &mock.RequestedItemsHandlerStub{}, - "topic", - "topic", - "topic", - "topic", - "topic", 100, + 0, ) - rrh.RequestHeaderByNonce(0, 0) + rrh.RequestMetaHeaderByNonce(0) assert.True(t, wasCalled) } -//------- RequestTransaction +//------- RequestSmartContractResult func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShouldNotPanic(t *testing.T) { t.Parallel() @@ -811,13 +697,8 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShou }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "scrtopic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestUnsignedTransactions(0, make([][]byte, 0)) @@ -842,13 +723,8 @@ func TestResolverRequestHandler_RequestScrWrongResolverShouldNotPanic(t *testing }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "scrtopic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestUnsignedTransactions(0, make([][]byte, 0)) @@ -872,13 +748,8 @@ func TestResolverRequestHandler_RequestScrShouldRequestScr(t *testing.T) { }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "scrtopic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestUnsignedTransactions(0, [][]byte{[]byte("txHash")}) @@ -918,13 +789,8 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi }, }, &mock.RequestedItemsHandlerStub{}, - "txTopic", - "scrtopic", - "topic", - "topic", - "topic", - "topic", 1, + 0, ) rrh.RequestUnsignedTransactions(0, [][]byte{[]byte("txHash")}) @@ -937,3 +803,38 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi time.Sleep(time.Second) } + +//------- RequestRewardTransaction + +func TestResolverRequestHandler_RequestRewardShouldRequestReward(t *testing.T) { + t.Parallel() + + chTxRequested := make(chan struct{}) + txResolver := &mock.HashSliceResolverStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte) error { + chTxRequested <- struct{}{} + return nil + }, + } + + rrh, _ := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{ + CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, e error) { + return txResolver, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + 1, + 0, + ) + + rrh.RequestRewardTransactions(0, [][]byte{[]byte("txHash")}) + + select { + case <-chTxRequested: + case <-time.After(timeoutSendRequests): + assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") + } + + time.Sleep(time.Second) +} diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 64944696ef3..41803b5f4ef 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -1,6 +1,8 @@ package resolvers import ( + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/logger" @@ -14,60 +16,66 @@ var log = logger.GetOrCreate("dataretriever/resolvers") // HeaderResolver is a wrapper over Resolver that is specialized in resolving headers requests type HeaderResolver struct { dataRetriever.TopicResolverSender - headers storage.Cacher - hdrNonces dataRetriever.Uint64SyncMapCacher + headers dataRetriever.HeadersPool hdrStorage storage.Storer hdrNoncesStorage storage.Storer marshalizer marshal.Marshalizer nonceConverter typeConverters.Uint64ByteSliceConverter + epochHandler dataRetriever.EpochHandler } // NewHeaderResolver creates a new header resolver func NewHeaderResolver( senderResolver dataRetriever.TopicResolverSender, - headers storage.Cacher, - headersNonces dataRetriever.Uint64SyncMapCacher, + headers dataRetriever.HeadersPool, hdrStorage storage.Storer, headersNoncesStorage storage.Storer, marshalizer marshal.Marshalizer, nonceConverter typeConverters.Uint64ByteSliceConverter, ) (*HeaderResolver, error) { - if senderResolver == nil || senderResolver.IsInterfaceNil() { + if check.IfNil(senderResolver) { return nil, dataRetriever.ErrNilResolverSender } - if headers == nil || headers.IsInterfaceNil() { + if check.IfNil(headers) { return nil, dataRetriever.ErrNilHeadersDataPool } - if headersNonces == nil || headersNonces.IsInterfaceNil() { - return nil, dataRetriever.ErrNilHeadersNoncesDataPool - } - if hdrStorage == nil || hdrStorage.IsInterfaceNil() { + if check.IfNil(hdrStorage) { return nil, dataRetriever.ErrNilHeadersStorage } - if headersNoncesStorage == nil || headersNoncesStorage.IsInterfaceNil() { + if check.IfNil(headersNoncesStorage) { return nil, dataRetriever.ErrNilHeadersNoncesStorage } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, dataRetriever.ErrNilMarshalizer } - if nonceConverter == nil || nonceConverter.IsInterfaceNil() { + if check.IfNil(nonceConverter) { return nil, dataRetriever.ErrNilUint64ByteSliceConverter } hdrResolver := &HeaderResolver{ TopicResolverSender: senderResolver, headers: headers, - hdrNonces: headersNonces, hdrStorage: hdrStorage, hdrNoncesStorage: headersNoncesStorage, marshalizer: marshalizer, nonceConverter: nonceConverter, + epochHandler: &nilEpochHandler{}, } return hdrResolver, nil } +// SetEpochHandler sets the epoch handler for this component +func (hdrRes *HeaderResolver) SetEpochHandler(epochHandler dataRetriever.EpochHandler) error { + if check.IfNil(epochHandler) { + return dataRetriever.ErrNilEpochHandler + } + + hdrRes.epochHandler = epochHandler + return nil +} + // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { @@ -82,6 +90,8 @@ func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, _ f buff, err = hdrRes.resolveHeaderFromHash(rd.Value) case dataRetriever.NonceType: buff, err = hdrRes.resolveHeaderFromNonce(rd.Value) + case dataRetriever.EpochType: + buff, err = hdrRes.resolveHeaderFromEpoch(rd.Value) default: return dataRetriever.ErrResolveTypeUnknown } @@ -99,35 +109,29 @@ func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, _ f func (hdrRes *HeaderResolver) resolveHeaderFromNonce(key []byte) ([]byte, error) { // key is now an encoded nonce (uint64) - // Search the nonce-key pair in cache-storage hash, err := hdrRes.hdrNoncesStorage.Get(key) if err != nil { log.Trace("hdrNoncesStorage.Get", "error", err.Error()) - } - // Search the nonce-key pair in data pool - if hash == nil { - nonceBytes, err := hdrRes.nonceConverter.ToUint64(key) + nonce, err := hdrRes.nonceConverter.ToUint64(key) if err != nil { return nil, dataRetriever.ErrInvalidNonceByteSlice } - value, ok := hdrRes.hdrNonces.Get(nonceBytes) - if ok { - value.Range(func(shardId uint32, existingHash []byte) bool { - if shardId == hdrRes.TargetShardID() { - hash = existingHash - return false - } - - return true - }) + headers, _, err := hdrRes.headers.GetHeadersByNonceAndShardId(nonce, hdrRes.TargetShardID()) + if err != nil { + return nil, err } - if len(hash) == 0 { - return nil, nil + // TODO maybe we can return a slice of headers + hdr := headers[len(headers)-1] + buff, err := hdrRes.marshalizer.Marshal(hdr) + if err != nil { + return nil, err } + + return buff, nil } return hdrRes.resolveHeaderFromHash(hash) @@ -135,8 +139,8 @@ func (hdrRes *HeaderResolver) resolveHeaderFromNonce(key []byte) ([]byte, error) // resolveHeaderFromHash resolves a header using its key (header hash) func (hdrRes *HeaderResolver) resolveHeaderFromHash(key []byte) ([]byte, error) { - value, ok := hdrRes.headers.Peek(key) - if !ok { + value, err := hdrRes.headers.GetHeaderByHash(key) + if err != nil { return hdrRes.hdrStorage.Get(key) } @@ -148,6 +152,22 @@ func (hdrRes *HeaderResolver) resolveHeaderFromHash(key []byte) ([]byte, error) return buff, nil } +// resolveHeaderFromEpoch resolves a header using its key based on epoch +func (hdrRes *HeaderResolver) resolveHeaderFromEpoch(key []byte) ([]byte, error) { + actualKey := key + + isUnknownEpoch, err := core.IsUnknownEpochIdentifier(key) + if err != nil { + return nil, err + } + + if isUnknownEpoch { + actualKey = []byte(core.EpochStartIdentifier(hdrRes.epochHandler.Epoch())) + } + + return hdrRes.hdrStorage.Get(actualKey) +} + // parseReceivedMessage will transform the received p2p.Message in a RequestData object. func (hdrRes *HeaderResolver) parseReceivedMessage(message p2p.MessageP2P) (*dataRetriever.RequestData, error) { rd := &dataRetriever.RequestData{} @@ -178,6 +198,14 @@ func (hdrRes *HeaderResolver) RequestDataFromNonce(nonce uint64) error { }) } +// RequestDataFromEpoch requests a header from other peers having input the epoch +func (hdrRes *HeaderResolver) RequestDataFromEpoch(identifier []byte) error { + return hdrRes.SendOnRequestTopic(&dataRetriever.RequestData{ + Type: dataRetriever.EpochType, + Value: identifier, + }) +} + // IsInterfaceNil returns true if there is no value under the interface func (hdrRes *HeaderResolver) IsInterfaceNil() bool { if hdrRes == nil { diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 967f3f78516..1fc61035732 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -3,10 +3,11 @@ package resolvers_test import ( "bytes" "errors" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" "testing" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/p2p" @@ -20,8 +21,7 @@ func TestNewHeaderResolver_NilSenderResolverShouldErr(t *testing.T) { hdrRes, err := resolvers.NewHeaderResolver( nil, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -38,7 +38,6 @@ func TestNewHeaderResolver_NilHeadersPoolShouldErr(t *testing.T) { hdrRes, err := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, nil, - &mock.Uint64SyncMapCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -49,30 +48,12 @@ func TestNewHeaderResolver_NilHeadersPoolShouldErr(t *testing.T) { assert.Nil(t, hdrRes) } -func TestNewHeaderResolver_NilHeadersNoncesPoolShouldErr(t *testing.T) { - t.Parallel() - - hdrRes, err := resolvers.NewHeaderResolver( - &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - nil, - &mock.StorerStub{}, - &mock.StorerStub{}, - &mock.MarshalizerMock{}, - mock.NewNonceHashConverterMock(), - ) - - assert.Equal(t, dataRetriever.ErrNilHeadersNoncesDataPool, err) - assert.Nil(t, hdrRes) -} - func TestNewHeaderResolver_NilHeadersStorageShouldErr(t *testing.T) { t.Parallel() hdrRes, err := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, nil, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -88,8 +69,7 @@ func TestNewHeaderResolver_NilHeadersNoncesStorageShouldErr(t *testing.T) { hdrRes, err := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, nil, &mock.MarshalizerMock{}, @@ -105,8 +85,7 @@ func TestNewHeaderResolver_NilMarshalizerShouldErr(t *testing.T) { hdrRes, err := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, nil, @@ -122,8 +101,7 @@ func TestNewHeaderResolver_NilNonceConverterShouldErr(t *testing.T) { hdrRes, err := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -139,8 +117,7 @@ func TestNewHeaderResolver_OkValsShouldWork(t *testing.T) { hdrRes, err := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -158,8 +135,7 @@ func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { hdrRes, _ := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -175,8 +151,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestUnknownTypeShouldErr(t *tes hdrRes, _ := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -196,14 +171,14 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend searchWasCalled := false sendWasCalled := false - headers := &mock.CacherStub{} + headers := &mock.HeadersCacherStub{} - headers.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(requestedData, key) { + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(requestedData, hash) { searchWasCalled = true - return make([]byte, 0), true + return &block.Header{}, nil } - return nil, false + return nil, errors.New("0") } marshalizer := &mock.MarshalizerMock{} @@ -216,7 +191,6 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend }, }, headers, - &mock.Uint64SyncMapCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, marshalizer, @@ -233,17 +207,16 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh t.Parallel() requestedData := []byte("aaaa") - resolvedData := []byte("bbbb") errExpected := errors.New("MarshalizerMock generic error") - headers := &mock.CacherStub{} + headers := &mock.HeadersCacherStub{} - headers.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(requestedData, key) { - return resolvedData, true + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(requestedData, hash) { + return &block.Header{}, nil } - return nil, false + return nil, errors.New("err") } marshalizerMock := &mock.MarshalizerMock{} @@ -263,7 +236,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh }, }, headers, - &mock.Uint64SyncMapCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, marshalizerStub, @@ -279,10 +251,10 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA requestedData := []byte("aaaa") - headers := &mock.CacherStub{} + headers := &mock.HeadersCacherStub{} - headers.PeekCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") } wasGotFromStorage := false @@ -308,7 +280,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA }, }, headers, - &mock.Uint64SyncMapCacherStub{}, store, &mock.StorerStub{}, marshalizer, @@ -326,8 +297,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeInvalidSliceShould hdrRes, _ := resolvers.NewHeaderResolver( &mock.TopicResolverSenderStub{}, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { @@ -346,14 +316,9 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce t.Parallel() requestedNonce := uint64(67) - - headersNonces := &mock.Uint64SyncMapCacherStub{} - headersNonces.GetCalled = func(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { - return nil, false - } - nonceConverter := mock.NewNonceHashConverterMock() + expectedErr := errors.New("err") wasSent := false hdrRes, _ := resolvers.NewHeaderResolver( @@ -362,9 +327,15 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce wasSent = true return nil }, + TargetShardIDCalled: func() uint32 { + return 1 + }, + }, + &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return nil, nil, expectedErr + }, }, - &mock.CacherStub{}, - headersNonces, &mock.StorerStub{}, &mock.StorerStub{ GetCalled: func(key []byte) (i []byte, e error) { @@ -379,7 +350,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce createRequestMsg(dataRetriever.NonceType, nonceConverter.ToByteSlice(requestedNonce)), nil, ) - assert.Nil(t, err) + assert.Equal(t, expectedErr, err) assert.False(t, wasSent) } @@ -390,28 +361,11 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo targetShardId := uint32(9) wasResolved := false wasSent := false - hash := []byte("aaaa") - - headers := &mock.CacherStub{} - headers.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(key, hash) { - wasResolved = true - return make([]byte, 0), true - } - - return nil, false - } - - headersNonces := &mock.Uint64SyncMapCacherStub{} - headersNonces.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == requestedNonce { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(targetShardId, hash) - - return syncMap, true - } - return nil, false + headers := &mock.HeadersCacherStub{} + headers.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + wasResolved = true + return []data.HeaderHandler{&block.Header{}, &block.Header{}}, [][]byte{[]byte("1"), []byte("2")}, nil } nonceConverter := mock.NewNonceHashConverterMock() @@ -428,7 +382,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo }, }, headers, - headersNonces, &mock.StorerStub{}, &mock.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { @@ -458,21 +411,13 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo wasSend := false hash := []byte("aaaa") - headers := &mock.CacherStub{} - headers.PeekCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false + headers := &mock.HeadersCacherStub{} + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") } - - headersNonces := &mock.Uint64SyncMapCacherStub{} - headersNonces.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == requestedNonce { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(targetShardId, hash) - - return syncMap, true - } - - return nil, false + headers.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + wasResolved = true + return []data.HeaderHandler{&block.Header{}, &block.Header{}}, [][]byte{[]byte("1"), []byte("2")}, nil } nonceConverter := mock.NewNonceHashConverterMock() @@ -499,7 +444,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo }, }, headers, - headersNonces, store, &mock.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { @@ -526,23 +470,13 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo requestedNonce := uint64(67) targetShardId := uint32(9) errExpected := errors.New("expected error") - hash := []byte("aaaa") - headers := &mock.CacherStub{} - headers.PeekCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false + headers := &mock.HeadersCacherStub{} + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") } - - headersNonces := &mock.Uint64SyncMapCacherStub{} - headersNonces.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == requestedNonce { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(targetShardId, hash) - - return syncMap, true - } - - return nil, false + headers.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return nil, nil, errExpected } nonceConverter := mock.NewNonceHashConverterMock() @@ -567,7 +501,6 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo }, }, headers, - headersNonces, store, &mock.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { @@ -607,8 +540,7 @@ func TestHeaderResolver_RequestDataFromNonceShouldWork(t *testing.T) { return nil }, }, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, @@ -635,8 +567,7 @@ func TestHeaderResolverBase_RequestDataFromHashShouldWork(t *testing.T) { return nil }, }, - &mock.CacherStub{}, - &mock.Uint64SyncMapCacherStub{}, + &mock.HeadersCacherStub{}, &mock.StorerStub{}, &mock.StorerStub{}, &mock.MarshalizerMock{}, diff --git a/dataRetriever/resolvers/nilEpochHandler.go b/dataRetriever/resolvers/nilEpochHandler.go new file mode 100644 index 00000000000..da8f548c0dc --- /dev/null +++ b/dataRetriever/resolvers/nilEpochHandler.go @@ -0,0 +1,14 @@ +package resolvers + +type nilEpochHandler struct { +} + +// Epoch returns the current epoch +func (n *nilEpochHandler) Epoch() uint32 { + return 0 +} + +// IsInterfaceNil returns if underlying struct is nil +func (n *nilEpochHandler) IsInterfaceNil() bool { + return n == nil +} diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go new file mode 100644 index 00000000000..861ddaef0de --- /dev/null +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -0,0 +1,88 @@ +package resolvers + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// maxBuffToSendTrieNodes represents max buffer size to send in bytes +var maxBuffToSendTrieNodes = uint64(2 << 17) //128KB + +// TrieNodeResolver is a wrapper over Resolver that is specialized in resolving trie node requests +type TrieNodeResolver struct { + dataRetriever.TopicResolverSender + trieDataGetter dataRetriever.TrieDataGetter + marshalizer marshal.Marshalizer +} + +// NewTrieNodeResolver creates a new trie node resolver +func NewTrieNodeResolver( + senderResolver dataRetriever.TopicResolverSender, + trieDataGetter dataRetriever.TrieDataGetter, + marshalizer marshal.Marshalizer, +) (*TrieNodeResolver, error) { + if check.IfNil(senderResolver) { + return nil, dataRetriever.ErrNilResolverSender + } + if check.IfNil(trieDataGetter) { + return nil, dataRetriever.ErrNilTrieDataGetter + } + if check.IfNil(marshalizer) { + return nil, dataRetriever.ErrNilMarshalizer + } + + return &TrieNodeResolver{ + TopicResolverSender: senderResolver, + trieDataGetter: trieDataGetter, + marshalizer: marshalizer, + }, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to, usually a request topic) +func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, _ func(buffToSend []byte)) error { + rd := &dataRetriever.RequestData{} + err := rd.Unmarshal(tnRes.marshalizer, message) + if err != nil { + return err + } + + if rd.Value == nil { + return dataRetriever.ErrNilValue + } + + switch rd.Type { + case dataRetriever.HashType: + serializedNodes, err := tnRes.trieDataGetter.GetSerializedNodes(rd.Value, maxBuffToSendTrieNodes) + if err != nil { + return err + } + + buff, err := tnRes.marshalizer.Marshal(serializedNodes) + if err != nil { + return err + } + + return tnRes.Send(buff, message.Peer()) + default: + return dataRetriever.ErrRequestTypeNotImplemented + } +} + +// RequestDataFromHash requests trie nodes from other peers having input a trie node hash +func (tnRes *TrieNodeResolver) RequestDataFromHash(hash []byte) error { + return tnRes.SendOnRequestTopic(&dataRetriever.RequestData{ + Type: dataRetriever.HashType, + Value: hash, + }) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tnRes *TrieNodeResolver) IsInterfaceNil() bool { + if tnRes == nil { + return true + } + return false +} diff --git a/dataRetriever/resolvers/trieNodeResolver_test.go b/dataRetriever/resolvers/trieNodeResolver_test.go new file mode 100644 index 00000000000..4d8453c2312 --- /dev/null +++ b/dataRetriever/resolvers/trieNodeResolver_test.go @@ -0,0 +1,211 @@ +package resolvers + +import ( + "bytes" + "errors" + "testing" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/dataRetriever/mock" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/stretchr/testify/assert" +) + +func TestNewTrieNodeResolver_NilResolverShouldErr(t *testing.T) { + t.Parallel() + + tnRes, err := NewTrieNodeResolver( + nil, + &mock.TrieStub{}, + &mock.MarshalizerMock{}, + ) + + assert.Equal(t, dataRetriever.ErrNilResolverSender, err) + assert.Nil(t, tnRes) +} + +func TestNewTrieNodeResolver_NilTrieShouldErr(t *testing.T) { + t.Parallel() + + tnRes, err := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{}, + nil, + &mock.MarshalizerMock{}, + ) + + assert.Equal(t, dataRetriever.ErrNilTrieDataGetter, err) + assert.Nil(t, tnRes) +} + +func TestNewTrieNodeResolver_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + tnRes, err := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{}, + &mock.TrieStub{}, + nil, + ) + + assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) + assert.Nil(t, tnRes) +} + +func TestNewTrieNodeResolver_OkValsShouldWork(t *testing.T) { + t.Parallel() + + tnRes, err := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{}, + &mock.TrieStub{}, + &mock.MarshalizerMock{}, + ) + + assert.NotNil(t, tnRes) + assert.Nil(t, err) +} + +//------- ProcessReceivedMessage + +func TestTrieNodeResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + tnRes, _ := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{}, + &mock.TrieStub{}, + &mock.MarshalizerMock{}, + ) + + err := tnRes.ProcessReceivedMessage(nil, nil) + assert.Equal(t, dataRetriever.ErrNilMessage, err) +} + +func TestTrieNodeResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + tnRes, _ := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{}, + &mock.TrieStub{}, + marshalizer, + ) + + data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.NonceType, Value: []byte("aaa")}) + msg := &mock.P2PMessageMock{DataField: data} + + err := tnRes.ProcessReceivedMessage(msg, nil) + assert.Equal(t, dataRetriever.ErrRequestTypeNotImplemented, err) +} + +func TestTrieNodeResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + + tnRes, _ := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{}, + &mock.TrieStub{}, + marshalizer, + ) + + data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: nil}) + msg := &mock.P2PMessageMock{DataField: data} + + err := tnRes.ProcessReceivedMessage(msg, nil) + assert.Equal(t, dataRetriever.ErrNilValue, err) +} + +func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndSend(t *testing.T) { + t.Parallel() + + marshalizer := &mock.MarshalizerMock{} + getSerializedNodesWasCalled := false + sendWasCalled := false + returnedEncNodes := [][]byte{[]byte("node1"), []byte("node2")} + + tr := &mock.TrieStub{ + GetSerializedNodesCalled: func(hash []byte, maxSize uint64) ([][]byte, error) { + if bytes.Equal([]byte("node1"), hash) { + getSerializedNodesWasCalled = true + return returnedEncNodes, nil + } + + return nil, errors.New("wrong hash") + }, + } + + tnRes, _ := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer p2p.PeerID) error { + sendWasCalled = true + return nil + }, + }, + tr, + marshalizer, + ) + + data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) + msg := &mock.P2PMessageMock{DataField: data} + + err := tnRes.ProcessReceivedMessage(msg, nil) + + assert.Nil(t, err) + assert.True(t, getSerializedNodesWasCalled) + assert.True(t, sendWasCalled) +} + +func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndMarshalizerFailShouldRetNilAndErr(t *testing.T) { + t.Parallel() + + errExpected := errors.New("MarshalizerMock generic error") + marshalizerMock := &mock.MarshalizerMock{} + marshalizerStub := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + return nil, errExpected + }, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return marshalizerMock.Unmarshal(obj, buff) + }, + } + + tnRes, _ := NewTrieNodeResolver( + &mock.TopicResolverSenderStub{}, + &mock.TrieStub{}, + marshalizerStub, + ) + + data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) + msg := &mock.P2PMessageMock{DataField: data} + + err := tnRes.ProcessReceivedMessage(msg, nil) + assert.Equal(t, errExpected, err) +} + +//------- RequestTransactionFromHash + +func TestTrieNodeResolver_RequestDataFromHashShouldWork(t *testing.T) { + t.Parallel() + + requested := &dataRetriever.RequestData{} + + res := &mock.TopicResolverSenderStub{} + res.SendOnRequestTopicCalled = func(rd *dataRetriever.RequestData) error { + requested = rd + return nil + } + + buffRequested := []byte("node1") + + tnRes, _ := NewTrieNodeResolver( + res, + &mock.TrieStub{}, + &mock.MarshalizerMock{}, + ) + + assert.Nil(t, tnRes.RequestDataFromHash(buffRequested)) + assert.Equal(t, &dataRetriever.RequestData{ + Type: dataRetriever.HashType, + Value: buffRequested, + }, requested) + +} diff --git a/dataRetriever/shardedData/shardedData.go b/dataRetriever/shardedData/shardedData.go index af5e198eb55..3bfb08660e4 100644 --- a/dataRetriever/shardedData/shardedData.go +++ b/dataRetriever/shardedData/shardedData.go @@ -194,21 +194,6 @@ func (sd *shardedData) MergeShardStores(sourceCacheId, destCacheId string) { sd.mutShardedDataStore.Unlock() } -// MoveData will move all given data associated with the sourceCacheId to the destCacheId -func (sd *shardedData) MoveData(sourceCacheId, destCacheId string, key [][]byte) { - sourceStore := sd.ShardDataStore(sourceCacheId) - - if sourceStore != nil { - for _, key := range key { - val, ok := sourceStore.Get(key) - if ok { - sd.AddData(key, val, destCacheId) - sd.RemoveData(key, sourceCacheId) - } - } - } -} - // Clear will delete all shard stores and associated data func (sd *shardedData) Clear() { sd.mutShardedDataStore.Lock() diff --git a/dataRetriever/shardedData/shardedData_test.go b/dataRetriever/shardedData/shardedData_test.go index ef8919866ff..63ad1306398 100644 --- a/dataRetriever/shardedData/shardedData_test.go +++ b/dataRetriever/shardedData/shardedData_test.go @@ -182,26 +182,6 @@ func TestShardedData_MergeShardStores(t *testing.T) { assert.Nil(t, sd.ShardDataStore("1")) } -func TestShardedData_MoveData(t *testing.T) { - t.Parallel() - - sd, _ := shardedData.NewShardedData(defaultTestConfig) - - sd.AddData([]byte("tx_hash1"), &transaction.Transaction{Nonce: 1}, "1") - sd.AddData([]byte("tx_hash2"), &transaction.Transaction{Nonce: 2}, "2") - sd.AddData([]byte("tx_hash3"), &transaction.Transaction{Nonce: 3}, "2") - sd.AddData([]byte("tx_hash4"), &transaction.Transaction{Nonce: 4}, "2") - sd.AddData([]byte("tx_hash5"), &transaction.Transaction{Nonce: 5}, "2") - sd.AddData([]byte("tx_hash6"), &transaction.Transaction{Nonce: 6}, "2") - - sd.MoveData("2", "3", [][]byte{[]byte("tx_hash5"), []byte("tx_hash6")}) - - assert.Equal(t, 3, sd.ShardDataStore("2").Len(), - "Mini pool for shard 2 should have 3 elements") - assert.Equal(t, 2, sd.ShardDataStore("3").Len(), - "Mini pool for shard 3 should have 2 elements") -} - func TestShardedData_RegisterAddedDataHandlerNilHandlerShouldIgnore(t *testing.T) { t.Parallel() diff --git a/dataRetriever/txpool/shardedTxPool.go b/dataRetriever/txpool/shardedTxPool.go new file mode 100644 index 00000000000..c699a0f911b --- /dev/null +++ b/dataRetriever/txpool/shardedTxPool.go @@ -0,0 +1,253 @@ +package txpool + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/ElrondNetwork/elrond-go/storage/txcache" +) + +var log = logger.GetOrCreate("dataretriever/txpool") + +// shardedTxPool holds transaction caches organised by destination shard +type shardedTxPool struct { + mutex sync.RWMutex + backingMap map[string]*txPoolShard + mutexAddCallbacks sync.RWMutex + onAddCallbacks []func(key []byte) + cacheConfig storageUnit.CacheConfig + evictionConfig txcache.EvictionConfig +} + +type txPoolShard struct { + CacheID string + Cache *txcache.TxCache +} + +// NewShardedTxPool creates a new sharded tx pool +// Implements "dataRetriever.TxPool" +func NewShardedTxPool(config storageUnit.CacheConfig) (dataRetriever.ShardedDataCacherNotifier, error) { + err := verifyConfig(config) + if err != nil { + return nil, err + } + + size := config.Size + evictionConfig := txcache.EvictionConfig{ + Enabled: true, + CountThreshold: size, + ThresholdEvictSenders: process.TxPoolThresholdEvictSenders, + NumOldestSendersToEvict: process.TxPoolNumOldestSendersToEvict, + ALotOfTransactionsForASender: process.TxPoolALotOfTransactionsForASender, + NumTxsToEvictForASenderWithALot: process.TxPoolNumTxsToEvictForASenderWithALot, + } + + shardedTxPool := &shardedTxPool{ + mutex: sync.RWMutex{}, + backingMap: make(map[string]*txPoolShard), + mutexAddCallbacks: sync.RWMutex{}, + onAddCallbacks: make([]func(key []byte), 0), + cacheConfig: config, + evictionConfig: evictionConfig, + } + + return shardedTxPool, nil +} + +func verifyConfig(config storageUnit.CacheConfig) error { + if config.Size < 1 { + return dataRetriever.ErrCacheConfigInvalidSize + } + if config.Shards < 1 { + return dataRetriever.ErrCacheConfigInvalidShards + } + + return nil +} + +// ShardDataStore returns the requested cache, as the generic Cacher interface +func (txPool *shardedTxPool) ShardDataStore(cacheID string) storage.Cacher { + cache := txPool.getTxCache(cacheID) + return cache +} + +// getTxCache returns the requested cache +func (txPool *shardedTxPool) getTxCache(cacheID string) *txcache.TxCache { + shard := txPool.getOrCreateShard(cacheID) + return shard.Cache +} + +func (txPool *shardedTxPool) getOrCreateShard(cacheID string) *txPoolShard { + txPool.mutex.RLock() + shard, ok := txPool.backingMap[cacheID] + txPool.mutex.RUnlock() + + if ok { + return shard + } + + shard = txPool.createShard(cacheID) + return shard +} + +func (txPool *shardedTxPool) createShard(cacheID string) *txPoolShard { + txPool.mutex.Lock() + defer txPool.mutex.Unlock() + + shard, ok := txPool.backingMap[cacheID] + if !ok { + nChunksHint := txPool.cacheConfig.Shards + evictionConfig := txPool.evictionConfig + cache := txcache.NewTxCacheWithEviction(nChunksHint, evictionConfig) + shard = &txPoolShard{ + CacheID: cacheID, + Cache: cache, + } + + txPool.backingMap[cacheID] = shard + } + + return shard +} + +// AddData adds the transaction to the cache +func (txPool *shardedTxPool) AddData(key []byte, value interface{}, cacheID string) { + valueAsTransaction, ok := value.(data.TransactionHandler) + if !ok { + return + } + + txPool.addTx(key, valueAsTransaction, cacheID) +} + +// addTx adds the transaction to the cache +func (txPool *shardedTxPool) addTx(txHash []byte, tx data.TransactionHandler, cacheID string) { + shard := txPool.getOrCreateShard(cacheID) + cache := shard.Cache + _, added := cache.AddTx(txHash, tx) + if added { + txPool.onAdded(txHash) + } +} + +func (txPool *shardedTxPool) onAdded(txHash []byte) { + txPool.mutexAddCallbacks.RLock() + defer txPool.mutexAddCallbacks.RUnlock() + + for _, handler := range txPool.onAddCallbacks { + go handler(txHash) + } +} + +// SearchFirstData searches the transaction against all shard data store, retrieving the first found +func (txPool *shardedTxPool) SearchFirstData(key []byte) (interface{}, bool) { + tx, ok := txPool.searchFirstTx(key) + return tx, ok +} + +// searchFirstTx searches the transaction against all shard data store, retrieving the first found +func (txPool *shardedTxPool) searchFirstTx(txHash []byte) (tx data.TransactionHandler, ok bool) { + txPool.mutex.RLock() + defer txPool.mutex.RUnlock() + + for _, shard := range txPool.backingMap { + tx, ok := shard.Cache.GetByTxHash(txHash) + if ok { + return tx, ok + } + } + + return nil, false +} + +// RemoveData removes the transaction from the pool +func (txPool *shardedTxPool) RemoveData(key []byte, cacheID string) { + txPool.removeTx(key, cacheID) +} + +// removeTx removes the transaction from the pool +func (txPool *shardedTxPool) removeTx(txHash []byte, cacheID string) { + shard := txPool.getOrCreateShard(cacheID) + _ = shard.Cache.RemoveTxByHash(txHash) +} + +// RemoveSetOfDataFromPool removes a bunch of transactions from the pool +func (txPool *shardedTxPool) RemoveSetOfDataFromPool(keys [][]byte, cacheID string) { + txPool.removeTxBulk(keys, cacheID) +} + +// removeTxBulk removes a bunch of transactions from the pool +func (txPool *shardedTxPool) removeTxBulk(txHashes [][]byte, cacheID string) { + for _, key := range txHashes { + txPool.removeTx(key, cacheID) + } +} + +// RemoveDataFromAllShards removes the transaction from the pool (it searches in all shards) +func (txPool *shardedTxPool) RemoveDataFromAllShards(key []byte) { + txPool.removeTxFromAllShards(key) +} + +// removeTxFromAllShards removes the transaction from the pool (it searches in all shards) +func (txPool *shardedTxPool) removeTxFromAllShards(txHash []byte) { + txPool.mutex.RLock() + defer txPool.mutex.RUnlock() + + for _, shard := range txPool.backingMap { + cache := shard.Cache + _ = cache.RemoveTxByHash(txHash) + } +} + +// MergeShardStores merges two shards of the pool +func (txPool *shardedTxPool) MergeShardStores(sourceCacheID, destCacheID string) { + sourceShard := txPool.getOrCreateShard(sourceCacheID) + sourceCache := sourceShard.Cache + + sourceCache.ForEachTransaction(func(txHash []byte, tx data.TransactionHandler) { + txPool.addTx(txHash, tx, destCacheID) + }) + + txPool.mutex.Lock() + delete(txPool.backingMap, sourceCacheID) + txPool.mutex.Unlock() +} + +// Clear clears everything in the pool +func (txPool *shardedTxPool) Clear() { + txPool.mutex.Lock() + txPool.backingMap = make(map[string]*txPoolShard) + txPool.mutex.Unlock() +} + +// ClearShardStore clears a specific cache +func (txPool *shardedTxPool) ClearShardStore(cacheID string) { + shard := txPool.getOrCreateShard(cacheID) + shard.Cache.Clear() +} + +// CreateShardStore is not implemented for this pool, since shard creations is managed internally +func (txPool *shardedTxPool) CreateShardStore(cacheID string) { +} + +// RegisterHandler registers a new handler to be called when a new transaction is added +func (txPool *shardedTxPool) RegisterHandler(handler func(key []byte)) { + if handler == nil { + log.Error("attempt to register a nil handler") + return + } + + txPool.mutexAddCallbacks.Lock() + txPool.onAddCallbacks = append(txPool.onAddCallbacks, handler) + txPool.mutexAddCallbacks.Unlock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (txPool *shardedTxPool) IsInterfaceNil() bool { + return txPool == nil +} diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go new file mode 100644 index 00000000000..80a95e39f08 --- /dev/null +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -0,0 +1,270 @@ +package txpool + +import ( + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/require" +) + +func Test_NewShardedTxPool(t *testing.T) { + pool, err := NewShardedTxPool(storageUnit.CacheConfig{Size: 1, Shards: 1}) + + require.Nil(t, err) + require.NotNil(t, pool) + require.Implements(t, (*dataRetriever.ShardedDataCacherNotifier)(nil), pool) +} + +func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { + pool, err := NewShardedTxPool(storageUnit.CacheConfig{Size: 1}) + require.Nil(t, pool) + require.NotNil(t, err) + require.Equal(t, dataRetriever.ErrCacheConfigInvalidShards, err) + + pool, err = NewShardedTxPool(storageUnit.CacheConfig{Shards: 1}) + require.Nil(t, pool) + require.NotNil(t, err) + require.Equal(t, err, dataRetriever.ErrCacheConfigInvalidSize) +} + +func Test_NewShardedTxPool_ComputesEvictionConfig(t *testing.T) { + poolAsInterface, err := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + require.Nil(t, err) + + pool := poolAsInterface.(*shardedTxPool) + + require.Equal(t, true, pool.evictionConfig.Enabled) + require.Equal(t, uint32(75000), pool.evictionConfig.CountThreshold) + require.Equal(t, uint32(1000), pool.evictionConfig.ThresholdEvictSenders) + require.Equal(t, uint32(500), pool.evictionConfig.NumOldestSendersToEvict) + require.Equal(t, uint32(500), pool.evictionConfig.ALotOfTransactionsForASender) + require.Equal(t, uint32(100), pool.evictionConfig.NumTxsToEvictForASenderWithALot) +} + +func Test_ShardDataStore_Or_GetTxCache(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 16}) + pool := poolAsInterface.(*shardedTxPool) + + fooGenericCache := pool.ShardDataStore("foo") + fooTxCache := pool.getTxCache("foo") + require.Equal(t, fooGenericCache, fooTxCache) +} + +func Test_ShardDataStore_CreatesIfMissingWithoutConcurrencyIssues(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 16}) + pool := poolAsInterface.(*shardedTxPool) + + var wg sync.WaitGroup + + // 100 * 100 caches will be created + + for i := 0; i < 100; i++ { + wg.Add(1) + + go func(i int) { + for j := 0; j < 100; j++ { + pool.ShardDataStore(fmt.Sprintf("%d_%d", i, j)) + } + + wg.Done() + }(i) + } + + wg.Wait() + + require.Equal(t, 10000, len(pool.backingMap)) + + for i := 0; i < 100; i++ { + for j := 0; j < 100; j++ { + _, inMap := pool.backingMap[fmt.Sprintf("%d_%d", i, j)] + require.True(t, inMap) + } + } +} + +func Test_AddData(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 16}) + pool := poolAsInterface.(*shardedTxPool) + cache := pool.getTxCache("1") + + pool.AddData([]byte("hash-x"), createTx("alice", 42), "1") + pool.AddData([]byte("hash-y"), createTx("alice", 43), "1") + require.Equal(t, int64(2), cache.CountTx()) + + // Try to add again, duplication does not occur + pool.AddData([]byte("hash-x"), createTx("alice", 42), "1") + require.Equal(t, int64(2), cache.CountTx()) + + _, ok := cache.GetByTxHash([]byte("hash-x")) + require.True(t, ok) + _, ok = cache.GetByTxHash([]byte("hash-y")) + require.True(t, ok) +} + +func Test_AddData_NoPanic_IfNotATransaction(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 100, Shards: 1}) + + require.NotPanics(t, func() { + poolAsInterface.AddData([]byte("hash"), &thisIsNotATransaction{}, "1") + }) +} + +func Test_AddData_CallsOnAddedHandlers(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + numAdded := uint32(0) + pool.RegisterHandler(func(key []byte) { + atomic.AddUint32(&numAdded, 1) + }) + + // Second addition is ignored (txhash-based deduplication) + pool.AddData([]byte("hash-1"), createTx("alice", 42), "1") + pool.AddData([]byte("hash-1"), createTx("whatever", 43), "1") + + waitABit() + require.Equal(t, uint32(1), atomic.LoadUint32(&numAdded)) +} + +func Test_SearchFirstData(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + tx := createTx("alice", 42) + pool.AddData([]byte("hash-x"), tx, "1") + + foundTx, ok := pool.SearchFirstData([]byte("hash-x")) + require.True(t, ok) + require.Equal(t, tx, foundTx) +} + +func Test_RemoveData(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + pool.AddData([]byte("hash-x"), createTx("alice", 42), "foo") + pool.AddData([]byte("hash-y"), createTx("bob", 43), "bar") + + pool.RemoveData([]byte("hash-x"), "foo") + pool.RemoveData([]byte("hash-y"), "bar") + xTx, xOk := pool.searchFirstTx([]byte("hash-x")) + yTx, yOk := pool.searchFirstTx([]byte("hash-y")) + require.False(t, xOk) + require.False(t, yOk) + require.Nil(t, xTx) + require.Nil(t, yTx) +} + +func Test_RemoveSetOfDataFromPool(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + cache := pool.getTxCache("foo") + + pool.AddData([]byte("hash-x"), createTx("alice", 42), "foo") + pool.AddData([]byte("hash-y"), createTx("bob", 43), "foo") + require.Equal(t, int64(2), cache.CountTx()) + + pool.RemoveSetOfDataFromPool([][]byte{[]byte("hash-x"), []byte("hash-y")}, "foo") + require.Zero(t, cache.CountTx()) +} + +func Test_RemoveDataFromAllShards(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + pool.AddData([]byte("hash-x"), createTx("alice", 42), "foo") + pool.AddData([]byte("hash-x"), createTx("alice", 42), "bar") + pool.RemoveDataFromAllShards([]byte("hash-x")) + + require.Zero(t, pool.getTxCache("foo").CountTx()) + require.Zero(t, pool.getTxCache("bar").CountTx()) +} + +func Test_MergeShardStores(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + pool.AddData([]byte("hash-x"), createTx("alice", 42), "foo") + pool.AddData([]byte("hash-y"), createTx("alice", 43), "bar") + pool.MergeShardStores("foo", "bar") + + require.Equal(t, int64(0), pool.getTxCache("foo").CountTx()) + require.Equal(t, int64(2), pool.getTxCache("bar").CountTx()) +} + +func Test_Clear(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + pool.AddData([]byte("hash-x"), createTx("alice", 42), "foo") + pool.AddData([]byte("hash-y"), createTx("alice", 43), "bar") + + pool.Clear() + require.Zero(t, pool.getTxCache("foo").CountTx()) + require.Zero(t, pool.getTxCache("bar").CountTx()) +} + +func Test_CreateShardStore(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + pool.AddData([]byte("hash-x"), createTx("alice", 42), "foo") + pool.AddData([]byte("hash-y"), createTx("alice", 43), "foo") + pool.AddData([]byte("hash-z"), createTx("alice", 15), "bar") + + pool.ClearShardStore("foo") + require.Equal(t, int64(0), pool.getTxCache("foo").CountTx()) + require.Equal(t, int64(1), pool.getTxCache("bar").CountTx()) +} + +func Test_RegisterHandler(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + pool.RegisterHandler(func(key []byte) {}) + require.Equal(t, 1, len(pool.onAddCallbacks)) + + pool.RegisterHandler(nil) + require.Equal(t, 1, len(pool.onAddCallbacks)) +} + +func Test_IsInterfaceNil(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + require.False(t, check.IfNil(poolAsInterface)) + + makeNil := func() dataRetriever.ShardedDataCacherNotifier { + return nil + } + + thisIsNil := makeNil() + require.True(t, check.IfNil(thisIsNil)) +} + +func Test_NotImplementedFunctions(t *testing.T) { + poolAsInterface, _ := NewShardedTxPool(storageUnit.CacheConfig{Size: 75000, Shards: 1}) + pool := poolAsInterface.(*shardedTxPool) + + require.NotPanics(t, func() { pool.CreateShardStore("foo") }) +} + +func createTx(sender string, nonce uint64) data.TransactionHandler { + return &transaction.Transaction{ + SndAddr: []byte(sender), + Nonce: nonce, + } +} + +func waitABit() { + time.Sleep(10 * time.Millisecond) +} + +type thisIsNotATransaction struct { +} diff --git a/display/tablePrinter.go b/display/tablePrinter.go index c9381fd8210..23f66e09e89 100644 --- a/display/tablePrinter.go +++ b/display/tablePrinter.go @@ -117,36 +117,36 @@ func computeColumnsWidths(header []string, data []*LineData) []int { } func drawHorizontalRule(builder *strings.Builder, columnsWidths []int) { - builder.WriteByte('+') + _ = builder.WriteByte('+') for i := 0; i < len(columnsWidths); i++ { for j := 0; j < columnsWidths[i]+2; j++ { - builder.WriteByte('-') + _ = builder.WriteByte('-') } - builder.WriteByte('+') + _ = builder.WriteByte('+') } - builder.Write([]byte{'\r', '\n'}) + _, _ = builder.Write([]byte{'\r', '\n'}) } func drawLine(builder *strings.Builder, columnsWidths []int, strings []string) { - builder.WriteByte('|') + _ = builder.WriteByte('|') for i := 0; i < len(columnsWidths); i++ { - builder.WriteByte(' ') + _ = builder.WriteByte(' ') lenStr := 0 if i < len(strings) { lenStr = utf8.RuneCountInString(strings[i]) - builder.WriteString(strings[i]) + _, _ = builder.WriteString(strings[i]) } for j := lenStr; j < columnsWidths[i]; j++ { - builder.WriteByte(' ') + _ = builder.WriteByte(' ') } - builder.Write([]byte{' ', '|'}) + _, _ = builder.Write([]byte{' ', '|'}) } - builder.Write([]byte{'\r', '\n'}) + _, _ = builder.Write([]byte{'\r', '\n'}) } diff --git a/epochStart/errors.go b/epochStart/errors.go new file mode 100644 index 00000000000..aba17c63ea4 --- /dev/null +++ b/epochStart/errors.go @@ -0,0 +1,84 @@ +package epochStart + +import "errors" + +// ErrNilArgsNewMetaEpochStartTrigger signals that nil arguments were provided +var ErrNilArgsNewMetaEpochStartTrigger = errors.New("nil arguments for meta start of epoch trigger") + +// ErrNilEpochStartSettings signals that nil start of epoch settings has been provided +var ErrNilEpochStartSettings = errors.New("nil start of epoch settings") + +// ErrInvalidSettingsForEpochStartTrigger signals that settings for start of epoch trigger are invalid +var ErrInvalidSettingsForEpochStartTrigger = errors.New("invalid start of epoch trigger settings") + +// ErrNilSyncTimer signals that sync timer is nil +var ErrNilSyncTimer = errors.New("nil sync timer") + +// ErrNilArgsNewShardEpochStartTrigger signals that nil arguments for shard epoch trigger has been provided +var ErrNilArgsNewShardEpochStartTrigger = errors.New("nil arguments for shard start of epoch trigger") + +// ErrNilEpochStartNotifier signals that nil epoch start notifier has been provided +var ErrNilEpochStartNotifier = errors.New("nil epoch start notifier") + +// ErrNotEnoughRoundsBetweenEpochs signals that not enough rounds has passed since last epoch start +var ErrNotEnoughRoundsBetweenEpochs = errors.New("tried to force start of epoch before passing of enough rounds") + +// ErrForceEpochStartCanBeCalledOnlyOnNewRound signals that force start of epoch was called on wrong round +var ErrForceEpochStartCanBeCalledOnlyOnNewRound = errors.New("invalid time to call force start of epoch, possible only on new round") + +// ErrSavedRoundIsHigherThanInputRound signals that input round was wrong +var ErrSavedRoundIsHigherThanInputRound = errors.New("saved round is higher than input round") + +// ErrSavedRoundIsHigherThanInput signals that input round was wrong +var ErrSavedRoundIsHigherThanInput = errors.New("saved round is higher than input round") + +// ErrWrongTypeAssertion signals wrong type assertion +var ErrWrongTypeAssertion = errors.New("wrong type assertion") + +// ErrNilMarshalizer signals that nil marshalizer has been provided +var ErrNilMarshalizer = errors.New("nil marshalizer") + +// ErrNilStorage signals that nil storage has been provided +var ErrNilStorage = errors.New("nil storage") + +// ErrNilHeaderHandler signals that a nil header handler has been provided +var ErrNilHeaderHandler = errors.New("nil header handler") + +// ErrNilArgsPendingMiniblocks signals that nil argument was passed +var ErrNilArgsPendingMiniblocks = errors.New("nil arguments for pending miniblock object") + +// ErrMetaHdrNotFound signals that metaheader was not found +var ErrMetaHdrNotFound = errors.New("meta header not found") + +// ErrNilHasher signals that nil hasher has been provided +var ErrNilHasher = errors.New("nil hasher") + +// ErrNilHeaderValidator signals that nil header validator has been provided +var ErrNilHeaderValidator = errors.New("nil header validator") + +// ErrNilDataPoolsHolder signals that nil data pools holder has been provided +var ErrNilDataPoolsHolder = errors.New("nil data pools holder") + +// ErrNilStorageService signals that nil storage service has been provided +var ErrNilStorageService = errors.New("nil storage service") + +// ErrNilRequestHandler signals that nil request handler has been provided +var ErrNilRequestHandler = errors.New("nil request handler") + +// ErrNilMetaBlockStorage signals that nil metablocks storage has been provided +var ErrNilMetaBlockStorage = errors.New("nil metablocks storage") + +// ErrNilMetaBlocksPool signals that nil metablock pools holder has been provided +var ErrNilMetaBlocksPool = errors.New("nil metablocks pool") + +// ErrNilHeaderNoncesPool signals that nil header nonces pool has been provided +var ErrNilHeaderNoncesPool = errors.New("nil header nonces pool") + +// ErrNilUint64Converter signals that nil uint64 converter has been provided +var ErrNilUint64Converter = errors.New("nil uint64 converter") + +// ErrNilMetaHdrStorage signals that nil meta header storage has been provided +var ErrNilMetaHdrStorage = errors.New("nil meta header storage") + +// ErrNilMetaNonceHashStorage signals that nil meta header nonce hash storage has been provided +var ErrNilMetaNonceHashStorage = errors.New("nil meta nonce hash storage") diff --git a/core/genesis/genesisBlockCreator.go b/epochStart/genesis/genesisBlockCreator.go similarity index 97% rename from core/genesis/genesisBlockCreator.go rename to epochStart/genesis/genesisBlockCreator.go index fb20f470ef8..026e6644ee5 100644 --- a/core/genesis/genesisBlockCreator.go +++ b/epochStart/genesis/genesisBlockCreator.go @@ -287,9 +287,8 @@ func deploySystemSmartContracts( RcvAddr: make([]byte, addrConv.AddressLen()), GasPrice: 0, GasLimit: 0, - Data: hex.EncodeToString([]byte("deploy")) + "@" + hex.EncodeToString(factory.SystemVirtualMachine), + Data: []byte(hex.EncodeToString([]byte("deploy")) + "@" + hex.EncodeToString(factory.SystemVirtualMachine)), Signature: nil, - Challenge: nil, } accountsDB, ok := accounts.(*state.AccountsDB) @@ -341,9 +340,8 @@ func setStakingData( SndAddr: nodeInfo.Address(), GasPrice: 0, GasLimit: 0, - Data: "stake@" + hex.EncodeToString(nodeInfo.PubKey()), + Data: []byte("stake@" + hex.EncodeToString(nodeInfo.PubKey())), Signature: nil, - Challenge: nil, } err := txProcessor.ProcessTransaction(tx) @@ -362,9 +360,8 @@ func setStakingData( SndAddr: nodeInfo.Address(), GasPrice: 0, GasLimit: 0, - Data: "stake@" + hex.EncodeToString(nodeInfo.PubKey()), + Data: []byte("stake@" + hex.EncodeToString(nodeInfo.PubKey())), Signature: nil, - Challenge: nil, } err := txProcessor.ProcessTransaction(tx) diff --git a/core/genesis/genesisBlockCreator_test.go b/epochStart/genesis/genesisBlockCreator_test.go similarity index 99% rename from core/genesis/genesisBlockCreator_test.go rename to epochStart/genesis/genesisBlockCreator_test.go index eb4f7e625d3..20d1f58e739 100644 --- a/core/genesis/genesisBlockCreator_test.go +++ b/epochStart/genesis/genesisBlockCreator_test.go @@ -6,9 +6,9 @@ import ( "math/big" "testing" - "github.com/ElrondNetwork/elrond-go/core/genesis" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/epochStart/genesis" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" diff --git a/epochStart/interface.go b/epochStart/interface.go new file mode 100644 index 00000000000..0c74038cac5 --- /dev/null +++ b/epochStart/interface.go @@ -0,0 +1,63 @@ +package epochStart + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" +) + +// TriggerHandler defines the functionalities for an start of epoch trigger +type TriggerHandler interface { + ForceEpochStart(round uint64) error + IsEpochStart() bool + Epoch() uint32 + ReceivedHeader(header data.HeaderHandler) + Update(round uint64) + EpochStartRound() uint64 + EpochStartMetaHdrHash() []byte + SetProcessed(header data.HeaderHandler) + SetFinalityAttestingRound(round uint64) + EpochFinalityAttestingRound() uint64 + Revert() + SetCurrentEpochStartRound(round uint64) + IsInterfaceNil() bool +} + +// PendingMiniBlocksHandler defines the actions which should be handled by pending miniblocks implementation +type PendingMiniBlocksHandler interface { + PendingMiniBlockHeaders(lastNotarizedHeaders []data.HeaderHandler) ([]block.ShardMiniBlockHeader, error) + AddProcessedHeader(handler data.HeaderHandler) error + RevertHeader(handler data.HeaderHandler) error + IsInterfaceNil() bool +} + +// Rounder defines the actions which should be handled by a round implementation +type Rounder interface { + // Index returns the current round + Index() int64 + // TimeStamp returns the time stamp of the round + TimeStamp() time.Time + IsInterfaceNil() bool +} + +// HeaderValidator defines the actions needed to validate a header +type HeaderValidator interface { + IsHeaderConstructionValid(currHdr, prevHdr data.HeaderHandler) error + IsInterfaceNil() bool +} + +// RequestHandler defines the methods through which request to data can be made +type RequestHandler interface { + RequestShardHeader(shardId uint32, hash []byte) + RequestMetaHeader(hash []byte) + RequestMetaHeaderByNonce(nonce uint64) + RequestShardHeaderByNonce(shardId uint32, nonce uint64) + IsInterfaceNil() bool +} + +// StartOfEpochNotifier defines what triggers should do for subscribed functions +type StartOfEpochNotifier interface { + NotifyAll(hdr data.HeaderHandler) + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/pendingMiniBlocks.go b/epochStart/metachain/pendingMiniBlocks.go new file mode 100644 index 00000000000..69f0b74872b --- /dev/null +++ b/epochStart/metachain/pendingMiniBlocks.go @@ -0,0 +1,259 @@ +package metachain + +import ( + "bytes" + "sort" + "sync" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +//ArgsPendingMiniBlocks is structure that contain components that are used to create a new pendingMiniBlockHeaders object +type ArgsPendingMiniBlocks struct { + Marshalizer marshal.Marshalizer + Storage storage.Storer + MetaBlockStorage storage.Storer + MetaBlockPool dataRetriever.HeadersPool +} + +type pendingMiniBlockHeaders struct { + marshalizer marshal.Marshalizer + metaBlockStorage storage.Storer + metaBlockPool dataRetriever.HeadersPool + storage storage.Storer + mutPending sync.Mutex + mapMiniBlockHeaders map[string]block.ShardMiniBlockHeader +} + +// NewPendingMiniBlocks will create a new pendingMiniBlockHeaders object +func NewPendingMiniBlocks(args *ArgsPendingMiniBlocks) (*pendingMiniBlockHeaders, error) { + if args == nil { + return nil, epochStart.ErrNilArgsPendingMiniblocks + } + if check.IfNil(args.Marshalizer) { + return nil, epochStart.ErrNilMarshalizer + } + if check.IfNil(args.Storage) { + return nil, epochStart.ErrNilStorage + } + if check.IfNil(args.MetaBlockStorage) { + return nil, epochStart.ErrNilMetaBlockStorage + } + if check.IfNil(args.MetaBlockPool) { + return nil, epochStart.ErrNilMetaBlocksPool + } + + return &pendingMiniBlockHeaders{ + marshalizer: args.Marshalizer, + storage: args.Storage, + mapMiniBlockHeaders: make(map[string]block.ShardMiniBlockHeader), + metaBlockPool: args.MetaBlockPool, + metaBlockStorage: args.MetaBlockStorage, + }, nil +} + +//PendingMiniBlockHeaders will return a sorted list of ShardMiniBlockHeaders +func (p *pendingMiniBlockHeaders) PendingMiniBlockHeaders( + lastNotarizedHeaders []data.HeaderHandler, +) ([]block.ShardMiniBlockHeader, error) { + shardMiniBlockHeaders := make([]block.ShardMiniBlockHeader, 0) + + mapLastUsedMetaBlocks, err := p.getLastUsedMetaBlockFromShardHeaders(lastNotarizedHeaders) + if err != nil { + return nil, err + } + + // make a list map of shardminiblock headers which are in these metablocks + mapShardMiniBlockHeaders := make(map[string]block.ShardMiniBlockHeader) + for _, lastMetaHdr := range mapLastUsedMetaBlocks { + crossShard := p.getAllCrossShardMiniBlocks(lastMetaHdr) + for key, shardMBHeader := range crossShard { + mapShardMiniBlockHeaders[key] = shardMBHeader + } + } + + // pending miniblocks are only those which are still pending and ar from the aforementioned list + p.mutPending.Lock() + defer p.mutPending.Unlock() + + for key, shMbHdr := range p.mapMiniBlockHeaders { + if _, ok := mapShardMiniBlockHeaders[key]; !ok { + continue + } + shardMiniBlockHeaders = append(shardMiniBlockHeaders, shMbHdr) + } + + sort.Slice(shardMiniBlockHeaders, func(i, j int) bool { + return bytes.Compare(shardMiniBlockHeaders[i].Hash, shardMiniBlockHeaders[j].Hash) < 0 + }) + + return shardMiniBlockHeaders, nil +} + +func (p *pendingMiniBlockHeaders) getAllCrossShardMiniBlocks(metaHdr *block.MetaBlock) map[string]block.ShardMiniBlockHeader { + crossShard := make(map[string]block.ShardMiniBlockHeader) + + for _, miniBlockHeader := range metaHdr.MiniBlockHeaders { + if miniBlockHeader.ReceiverShardID != sharding.MetachainShardId { + continue + } + + shardMiniBlockHeader := block.ShardMiniBlockHeader{ + Hash: miniBlockHeader.Hash, + ReceiverShardID: miniBlockHeader.ReceiverShardID, + SenderShardID: miniBlockHeader.SenderShardID, + TxCount: miniBlockHeader.TxCount, + } + crossShard[string(miniBlockHeader.Hash)] = shardMiniBlockHeader + } + + for _, shardData := range metaHdr.ShardInfo { + for _, mbHeader := range shardData.ShardMiniBlockHeaders { + if mbHeader.SenderShardID == mbHeader.ReceiverShardID { + continue + } + if mbHeader.ReceiverShardID == sharding.MetachainShardId { + continue + } + + crossShard[string(mbHeader.Hash)] = mbHeader + } + } + + return crossShard +} + +func (p *pendingMiniBlockHeaders) getLastUsedMetaBlockFromShardHeaders( + lastNotarizedHeaders []data.HeaderHandler, +) (map[string]*block.MetaBlock, error) { + mapLastUsedMetaBlocks := make(map[string]*block.MetaBlock) + for _, header := range lastNotarizedHeaders { + shardHdr, ok := header.(*block.Header) + if !ok { + return nil, epochStart.ErrWrongTypeAssertion + } + + numMetas := len(shardHdr.MetaBlockHashes) + if numMetas == 0 { + continue + } + + lastMetaBlockHash := shardHdr.MetaBlockHashes[numMetas-1] + if _, ok := mapLastUsedMetaBlocks[string(lastMetaBlockHash)]; ok { + continue + } + + lastMetaHdr, err := p.getMetaBlockByHash(lastMetaBlockHash) + if err != nil { + return nil, err + } + + mapLastUsedMetaBlocks[string(lastMetaBlockHash)] = lastMetaHdr + } + + return mapLastUsedMetaBlocks, nil +} + +func (p *pendingMiniBlockHeaders) getMetaBlockByHash(metaHash []byte) (*block.MetaBlock, error) { + peekedData, _ := p.metaBlockPool.GetHeaderByHash(metaHash) + metaHdr, ok := peekedData.(*block.MetaBlock) + if ok { + return metaHdr, nil + } + + buff, err := p.metaBlockStorage.Get(metaHash) + if err != nil { + return nil, err + } + + var metaHeader block.MetaBlock + err = p.marshalizer.Unmarshal(&metaHeader, buff) + if err != nil { + return nil, err + } + + return &metaHeader, nil +} + +// AddProcessedHeader will add all miniblocks headers in a map +func (p *pendingMiniBlockHeaders) AddProcessedHeader(handler data.HeaderHandler) error { + if check.IfNil(handler) { + return epochStart.ErrNilHeaderHandler + } + + metaHdr, ok := handler.(*block.MetaBlock) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + crossShard := p.getAllCrossShardMiniBlocks(metaHdr) + + var err error + p.mutPending.Lock() + defer func() { + p.mutPending.Unlock() + if err != nil { + _ = p.RevertHeader(handler) + } + }() + + for key, mbHeader := range crossShard { + if _, ok = p.mapMiniBlockHeaders[key]; !ok { + p.mapMiniBlockHeaders[key] = mbHeader + continue + } + + delete(p.mapMiniBlockHeaders, key) + + var buff []byte + buff, err = p.marshalizer.Marshal(mbHeader) + if err != nil { + return err + } + + err = p.storage.Put(mbHeader.Hash, buff) + if err != nil { + return err + } + } + + return nil +} + +// RevertHeader will remove all minibloks headers that are in metablock from pending +func (p *pendingMiniBlockHeaders) RevertHeader(handler data.HeaderHandler) error { + if check.IfNil(handler) { + return epochStart.ErrNilHeaderHandler + } + + metaHdr, ok := handler.(*block.MetaBlock) + if !ok { + return epochStart.ErrWrongTypeAssertion + } + + crossShard := p.getAllCrossShardMiniBlocks(metaHdr) + + for mbHash, mbHeader := range crossShard { + if _, ok = p.mapMiniBlockHeaders[mbHash]; ok { + delete(p.mapMiniBlockHeaders, mbHash) + continue + } + + _ = p.storage.Remove([]byte(mbHash)) + p.mapMiniBlockHeaders[mbHash] = mbHeader + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (p *pendingMiniBlockHeaders) IsInterfaceNil() bool { + return p == nil +} diff --git a/epochStart/metachain/pendingMiniBlocks_test.go b/epochStart/metachain/pendingMiniBlocks_test.go new file mode 100644 index 00000000000..333de2ddb56 --- /dev/null +++ b/epochStart/metachain/pendingMiniBlocks_test.go @@ -0,0 +1,259 @@ +package metachain + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/stretchr/testify/assert" +) + +func createMockArguments() *ArgsPendingMiniBlocks { + return &ArgsPendingMiniBlocks{ + Marshalizer: &mock.MarshalizerMock{}, + Storage: &mock.StorerStub{}, + MetaBlockStorage: &mock.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + return nil, epochStart.ErrMetaHdrNotFound + }, + }, + MetaBlockPool: &mock.HeadersCacherStub{}, + } +} + +func isMbInSlice(hash []byte, shdMbHdrs []block.ShardMiniBlockHeader) bool { + for _, shdMbHdr := range shdMbHdrs { + if bytes.Equal(shdMbHdr.Hash, hash) { + return true + } + } + return false +} + +func TestNewPendingMiniBlocks_NilArgumentsShouldErr(t *testing.T) { + t.Parallel() + + pmb, err := NewPendingMiniBlocks(nil) + + assert.Nil(t, pmb) + assert.Equal(t, epochStart.ErrNilArgsPendingMiniblocks, err) +} + +func TestNewPendingMiniBlocks_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + arguments.Marshalizer = nil + + pmb, err := NewPendingMiniBlocks(arguments) + assert.Nil(t, pmb) + assert.Equal(t, epochStart.ErrNilMarshalizer, err) +} + +func TestNewPendingMiniBlocks_NilStorageShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + arguments.Storage = nil + + pmb, err := NewPendingMiniBlocks(arguments) + assert.Nil(t, pmb) + assert.Equal(t, epochStart.ErrNilStorage, err) +} + +func TestNewPendingMiniBlocks_ShouldWork(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + + pmb, err := NewPendingMiniBlocks(arguments) + assert.NotNil(t, pmb) + assert.Nil(t, err) +} + +func TestPendingMiniBlockHeaders_AddCommittedHeaderNilHeaderShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + pmb, _ := NewPendingMiniBlocks(arguments) + + err := pmb.AddProcessedHeader(nil) + assert.Equal(t, epochStart.ErrNilHeaderHandler, err) +} + +func TestPendingMiniBlockHeaders_AddProcessedHeaderWrongHeaderShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + pmb, _ := NewPendingMiniBlocks(arguments) + header := &block.Header{} + + err := pmb.AddProcessedHeader(header) + assert.Equal(t, epochStart.ErrWrongTypeAssertion, err) +} + +func TestPendingMiniBlockHeaders_AddProcessedHeader(t *testing.T) { + t.Parallel() + + hash1 := []byte("hash1") + hash2 := []byte("hash2") + + arguments := createMockArguments() + arguments.Storage = &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + return nil + }, + } + + header := &block.MetaBlock{ + ShardInfo: []block.ShardData{ + {ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ + {Hash: hash1, SenderShardID: 1}, + {Hash: hash2, SenderShardID: 1}, + }}, + }, + } + arguments.MetaBlockPool = &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return header, nil + }, + } + + shardHeader := &block.Header{MetaBlockHashes: [][]byte{[]byte("metaHash")}} + + pmb, _ := NewPendingMiniBlocks(arguments) + err := pmb.AddProcessedHeader(header) + assert.Nil(t, err) + + //Check miniblocks headers are returned + shdMbHdrs, err := pmb.PendingMiniBlockHeaders([]data.HeaderHandler{shardHeader}) + assert.True(t, isMbInSlice(hash1, shdMbHdrs)) + assert.True(t, isMbInSlice(hash2, shdMbHdrs)) + assert.Nil(t, err) + + err = pmb.AddProcessedHeader(header) + assert.Nil(t, err) + + //Check miniblocks headers are removed from pending list + shdMbHdrs, err = pmb.PendingMiniBlockHeaders([]data.HeaderHandler{shardHeader}) + assert.False(t, isMbInSlice(hash1, shdMbHdrs)) + assert.False(t, isMbInSlice(hash2, shdMbHdrs)) + assert.Nil(t, err) +} + +func TestPendingMiniBlockHeaders_PendingMiniBlockHeaders(t *testing.T) { + t.Parallel() + + hash1 := []byte("hash1") + hash2 := []byte("hash2") + hash3 := []byte("hash3") + hash4 := []byte("hash4") + hash5 := []byte("hash5") + + arguments := createMockArguments() + arguments.Storage = &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + return nil + }, + } + header := &block.MetaBlock{ + ShardInfo: []block.ShardData{ + {ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ + {Hash: hash1, SenderShardID: 1, TxCount: 5}, + {Hash: hash2, SenderShardID: 1, TxCount: 5}, + {Hash: hash3, SenderShardID: 1, TxCount: 5}, + {Hash: hash4, SenderShardID: 1, TxCount: 5}, + {Hash: hash5, SenderShardID: 1, TxCount: 5}, + }}, + }, + } + arguments.MetaBlockPool = &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return header, nil + }, + } + shardHeader := &block.Header{MetaBlockHashes: [][]byte{[]byte("metaHash")}} + + pmb, _ := NewPendingMiniBlocks(arguments) + err := pmb.AddProcessedHeader(header) + assert.Nil(t, err) + + //Check miniblocks headers are returned + shdMbHdrs, _ := pmb.PendingMiniBlockHeaders([]data.HeaderHandler{shardHeader}) + assert.Equal(t, len(shdMbHdrs), len(header.ShardInfo[0].ShardMiniBlockHeaders)) +} + +func TestPendingMiniBlockHeaders_AddProcessedHeaderCannotMarshalShouldRevert(t *testing.T) { + t.Parallel() + + hash1 := []byte("hash1") + hash2 := []byte("hash2") + + arguments := createMockArguments() + arguments.Storage = &mock.StorerStub{ + PutCalled: func(key, data []byte) error { + return nil + }, + RemoveCalled: func(key []byte) error { + return nil + }, + } + arguments.Marshalizer = &mock.MarshalizerMock{ + Fail: true, + } + + header := &block.MetaBlock{ + ShardInfo: []block.ShardData{ + {ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ + {Hash: hash1, SenderShardID: 1}, + {Hash: hash2, SenderShardID: 1}, + }}, + }, + } + arguments.MetaBlockPool = &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return header, nil + }, + } + shardHeader := &block.Header{MetaBlockHashes: [][]byte{[]byte("metaHash")}} + + pmb, _ := NewPendingMiniBlocks(arguments) + err := pmb.AddProcessedHeader(header) + assert.Nil(t, err) + + shdMbHdrs, _ := pmb.PendingMiniBlockHeaders([]data.HeaderHandler{shardHeader}) + assert.True(t, isMbInSlice(hash1, shdMbHdrs)) + assert.True(t, isMbInSlice(hash2, shdMbHdrs)) + + err = pmb.AddProcessedHeader(header) + assert.NotNil(t, err) + + //Check miniblocks headers are not removed from pending list + shdMbHdrs, _ = pmb.PendingMiniBlockHeaders([]data.HeaderHandler{shardHeader}) + assert.True(t, isMbInSlice(hash1, shdMbHdrs)) + assert.False(t, isMbInSlice(hash2, shdMbHdrs)) +} + +func TestPendingMiniBlockHeaders_RevertHeaderNilHeaderShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + pmb, _ := NewPendingMiniBlocks(arguments) + + err := pmb.RevertHeader(nil) + assert.Equal(t, epochStart.ErrNilHeaderHandler, err) +} + +func TestPendingMiniBlockHeaders_RevertHeaderWrongHeaderTypeShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockArguments() + pmb, _ := NewPendingMiniBlocks(arguments) + header := &block.Header{} + + err := pmb.RevertHeader(header) + assert.Equal(t, epochStart.ErrWrongTypeAssertion, err) +} diff --git a/epochStart/metachain/testTrigger.go b/epochStart/metachain/testTrigger.go new file mode 100644 index 00000000000..92df87cd508 --- /dev/null +++ b/epochStart/metachain/testTrigger.go @@ -0,0 +1,33 @@ +package metachain + +import "github.com/ElrondNetwork/elrond-go/epochStart" + +// TestTrigger extends start of epoch trigger and is used in integration tests as it exposes some functions +// that are not supposed to be used in production code +// Exported functions simplify the reproduction of edge cases +type TestTrigger struct { + *trigger +} + +// SetTrigger sets the start of epoch trigger +func (t *TestTrigger) SetTrigger(triggerHandler epochStart.TriggerHandler) { + actualTrigger, ok := triggerHandler.(*trigger) + if !ok { + return + } + + t.trigger = actualTrigger +} + +// SetRoundsPerEpoch sets the number of round between epochs +func (t *TestTrigger) SetRoundsPerEpoch(roundsPerEpoch uint64) { + t.roundsPerEpoch = roundsPerEpoch + if t.minRoundsBetweenEpochs > t.roundsPerEpoch { + t.minRoundsBetweenEpochs = t.roundsPerEpoch - 1 + } +} + +// GetRoundsPerEpoch gets the number of rounds per epoch +func (t *TestTrigger) GetRoundsPerEpoch() uint64 { + return t.roundsPerEpoch +} diff --git a/epochStart/metachain/trigger.go b/epochStart/metachain/trigger.go new file mode 100644 index 00000000000..f7f9bafde63 --- /dev/null +++ b/epochStart/metachain/trigger.go @@ -0,0 +1,256 @@ +package metachain + +import ( + "sync" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.GetOrCreate("epochStart/metachain") + +// ArgsNewMetaEpochStartTrigger defines struct needed to create a new start of epoch trigger +type ArgsNewMetaEpochStartTrigger struct { + GenesisTime time.Time + Settings *config.EpochStartConfig + Epoch uint32 + EpochStartRound uint64 + EpochStartNotifier epochStart.StartOfEpochNotifier + Marshalizer marshal.Marshalizer + Storage dataRetriever.StorageService +} + +type trigger struct { + isEpochStart bool + epoch uint32 + currentRound uint64 + epochFinalityAttestingRound uint64 + currEpochStartRound uint64 + prevEpochStartRound uint64 + roundsPerEpoch uint64 + minRoundsBetweenEpochs uint64 + epochStartMetaHash []byte + epochStartTime time.Time + mutTrigger sync.RWMutex + epochStartNotifier epochStart.StartOfEpochNotifier + metaHdrStorage storage.Storer + marshalizer marshal.Marshalizer +} + +// NewEpochStartTrigger creates a trigger for start of epoch +func NewEpochStartTrigger(args *ArgsNewMetaEpochStartTrigger) (*trigger, error) { + if args == nil { + return nil, epochStart.ErrNilArgsNewMetaEpochStartTrigger + } + if args.Settings == nil { + return nil, epochStart.ErrNilEpochStartSettings + } + if args.Settings.RoundsPerEpoch < 1 { + return nil, epochStart.ErrInvalidSettingsForEpochStartTrigger + } + if args.Settings.MinRoundsBetweenEpochs < 1 { + return nil, epochStart.ErrInvalidSettingsForEpochStartTrigger + } + if args.Settings.MinRoundsBetweenEpochs > args.Settings.RoundsPerEpoch { + return nil, epochStart.ErrInvalidSettingsForEpochStartTrigger + } + if check.IfNil(args.EpochStartNotifier) { + return nil, epochStart.ErrNilEpochStartNotifier + } + if check.IfNil(args.Marshalizer) { + return nil, epochStart.ErrNilMarshalizer + } + if check.IfNil(args.Storage) { + return nil, epochStart.ErrNilStorageService + } + + metaHdrStorage := args.Storage.GetStorer(dataRetriever.MetaBlockUnit) + if check.IfNil(metaHdrStorage) { + return nil, epochStart.ErrNilMetaHdrStorage + } + + return &trigger{ + roundsPerEpoch: uint64(args.Settings.RoundsPerEpoch), + epochStartTime: args.GenesisTime, + currEpochStartRound: args.EpochStartRound, + prevEpochStartRound: args.EpochStartRound, + epoch: args.Epoch, + minRoundsBetweenEpochs: uint64(args.Settings.MinRoundsBetweenEpochs), + mutTrigger: sync.RWMutex{}, + epochFinalityAttestingRound: args.EpochStartRound, + epochStartNotifier: args.EpochStartNotifier, + metaHdrStorage: metaHdrStorage, + marshalizer: args.Marshalizer, + }, nil +} + +// IsEpochStart return true if conditions are fulfilled for start of epoch +func (t *trigger) IsEpochStart() bool { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.isEpochStart +} + +// EpochStartRound returns the start round of the current epoch +func (t *trigger) EpochStartRound() uint64 { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.currEpochStartRound +} + +// EpochFinalityAttestingRound returns the round when epoch start block was finalized +func (t *trigger) EpochFinalityAttestingRound() uint64 { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.epochFinalityAttestingRound +} + +// ForceEpochStart sets the conditions for start of epoch to true in case of edge cases +func (t *trigger) ForceEpochStart(round uint64) error { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + if t.currentRound > round { + return epochStart.ErrSavedRoundIsHigherThanInput + } + if t.currentRound == round { + return epochStart.ErrForceEpochStartCanBeCalledOnlyOnNewRound + } + + t.currentRound = round + + if t.currentRound-t.currEpochStartRound < t.minRoundsBetweenEpochs { + return epochStart.ErrNotEnoughRoundsBetweenEpochs + } + + if !t.isEpochStart { + t.epoch += 1 + } + + t.currEpochStartRound = t.currentRound + t.isEpochStart = true + + return nil +} + +// Update processes changes in the trigger +func (t *trigger) Update(round uint64) { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + if t.isEpochStart { + return + } + + if round <= t.currentRound { + return + } + + t.currentRound = round + + if t.currentRound > t.currEpochStartRound+t.roundsPerEpoch { + t.epoch += 1 + t.isEpochStart = true + t.currEpochStartRound = t.currentRound + } +} + +// SetProcessed sets start of epoch to false and cleans underlying structure +func (t *trigger) SetProcessed(header data.HeaderHandler) { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + metaBlock, ok := header.(*block.MetaBlock) + if !ok { + return + } + if !metaBlock.IsStartOfEpochBlock() { + return + } + + metaBuff, err := t.marshalizer.Marshal(metaBlock) + if err != nil { + log.Debug("SetProcessed marshal", "error", err.Error()) + } + + epochStartIdentifier := core.EpochStartIdentifier(metaBlock.Epoch) + err = t.metaHdrStorage.Put([]byte(epochStartIdentifier), metaBuff) + if err != nil { + log.Debug("SetProcessed put into metaHdrStorage", "error", err.Error()) + } + + t.currEpochStartRound = metaBlock.Round + t.epoch = metaBlock.Epoch + t.epochStartNotifier.NotifyAll(metaBlock) + t.isEpochStart = false +} + +// SetFinalityAttestingRound sets the round which finalized the start of epoch block +func (t *trigger) SetFinalityAttestingRound(round uint64) { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + if round > t.currEpochStartRound { + t.epochFinalityAttestingRound = round + } +} + +// Revert sets the start of epoch back to true +func (t *trigger) Revert() { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + epochStartIdentifier := core.EpochStartIdentifier(t.epoch) + err := t.metaHdrStorage.Remove([]byte(epochStartIdentifier)) + if err != nil { + log.Debug("Revert remove from metaHdrStorage", "error", err.Error()) + } + + t.isEpochStart = true +} + +// Epoch return the current epoch +func (t *trigger) Epoch() uint32 { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.epoch +} + +// ReceivedHeader saved the header into pool to verify if end-of-epoch conditions are fulfilled +func (t *trigger) ReceivedHeader(_ data.HeaderHandler) { +} + +// EpochStartMetaHdrHash returns the announcing meta header hash which created the new epoch +func (t *trigger) EpochStartMetaHdrHash() []byte { + return t.epochStartMetaHash +} + +// SetEpochStartMetaHdrHash sets the epoch start meta header hase +func (t *trigger) SetEpochStartMetaHdrHash(metaHdrHash []byte) { + t.epochStartMetaHash = metaHdrHash +} + +// SetLastEpochStartRound sets the round when the current epoch started +func (t *trigger) SetCurrentEpochStartRound(round uint64) { + t.mutTrigger.Lock() + t.currentRound = round + t.mutTrigger.Unlock() +} + +// IsInterfaceNil return true if underlying object is nil +func (t *trigger) IsInterfaceNil() bool { + return t == nil +} diff --git a/epochStart/metachain/trigger_test.go b/epochStart/metachain/trigger_test.go new file mode 100644 index 00000000000..f5cd47859d5 --- /dev/null +++ b/epochStart/metachain/trigger_test.go @@ -0,0 +1,205 @@ +package metachain + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" +) + +func createMockEpochStartTriggerArguments() *ArgsNewMetaEpochStartTrigger { + return &ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Time{}, + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1, + RoundsPerEpoch: 2, + }, + Epoch: 0, + EpochStartNotifier: &mock.EpochStartNotifierStub{}, + Marshalizer: &mock.MarshalizerMock{}, + Storage: &mock.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + GetCalled: func(key []byte) (bytes []byte, err error) { + return []byte("hash"), nil + }, + PutCalled: func(key, data []byte) error { + return nil + }, + } + }, + }, + } +} + +func TestNewEpochStartTrigger_NilArgumentsShouldErr(t *testing.T) { + t.Parallel() + + epochStartTrigger, err := NewEpochStartTrigger(nil) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilArgsNewMetaEpochStartTrigger, err) +} + +func TestNewEpochStartTrigger_NilSettingsShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + arguments.Settings = nil + + epochStartTrigger, err := NewEpochStartTrigger(arguments) + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilEpochStartSettings, err) +} + +func TestNewEpochStartTrigger_InvalidSettingsShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + arguments.Settings.RoundsPerEpoch = 0 + + epochStartTrigger, err := NewEpochStartTrigger(arguments) + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrInvalidSettingsForEpochStartTrigger, err) +} + +func TestNewEpochStartTrigger_NilEpochStartNotifierShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + arguments.EpochStartNotifier = nil + + epochStartTrigger, err := NewEpochStartTrigger(arguments) + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilEpochStartNotifier, err) +} + +func TestNewEpochStartTrigger_InvalidSettingsShouldErr2(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + arguments.Settings.RoundsPerEpoch = 1 + arguments.Settings.MinRoundsBetweenEpochs = 0 + + epochStartTrigger, err := NewEpochStartTrigger(arguments) + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrInvalidSettingsForEpochStartTrigger, err) +} + +func TestNewEpochStartTrigger_InvalidSettingsShouldErr3(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + arguments.Settings.RoundsPerEpoch = 4 + arguments.Settings.MinRoundsBetweenEpochs = 6 + + epochStartTrigger, err := NewEpochStartTrigger(arguments) + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrInvalidSettingsForEpochStartTrigger, err) +} + +func TestNewEpochStartTrigger_ShouldOk(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + + epochStartTrigger, err := NewEpochStartTrigger(arguments) + assert.NotNil(t, epochStartTrigger) + assert.Nil(t, err) +} + +func TestTrigger_Update(t *testing.T) { + t.Parallel() + + notifierWasCalled := false + epoch := uint32(0) + round := uint64(0) + arguments := createMockEpochStartTriggerArguments() + arguments.Epoch = epoch + arguments.EpochStartNotifier = &mock.EpochStartNotifierStub{ + NotifyAllCalled: func(hdr data.HeaderHandler) { + notifierWasCalled = true + }, + } + epochStartTrigger, _ := NewEpochStartTrigger(arguments) + + epochStartTrigger.Update(round) + round++ + epochStartTrigger.Update(round) + round++ + epochStartTrigger.Update(round) + round++ + epochStartTrigger.Update(round) + + ret := epochStartTrigger.IsEpochStart() + assert.True(t, ret) + + epc := epochStartTrigger.Epoch() + assert.Equal(t, epoch+1, epc) + + epochStartTrigger.SetProcessed(&block.MetaBlock{ + Round: round, + EpochStart: block.EpochStart{LastFinalizedHeaders: []block.EpochStartShardData{{RootHash: []byte("root")}}}}) + ret = epochStartTrigger.IsEpochStart() + assert.False(t, ret) + assert.True(t, notifierWasCalled) +} + +func TestTrigger_ForceEpochStartIncorrectRoundShouldErr(t *testing.T) { + t.Parallel() + + round := uint64(1) + arguments := createMockEpochStartTriggerArguments() + epochStartTrigger, _ := NewEpochStartTrigger(arguments) + + epochStartTrigger.Update(round) + + err := epochStartTrigger.ForceEpochStart(0) + assert.Equal(t, epochStart.ErrSavedRoundIsHigherThanInputRound, err) +} + +func TestTrigger_ForceEpochStartRoundEqualWithSavedRoundShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + epochStartTrigger, _ := NewEpochStartTrigger(arguments) + + err := epochStartTrigger.ForceEpochStart(0) + assert.Equal(t, epochStart.ErrForceEpochStartCanBeCalledOnlyOnNewRound, err) +} + +func TestTrigger_ForceEpochStartNotEnoughRoundsShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockEpochStartTriggerArguments() + arguments.Settings.MinRoundsBetweenEpochs = 2 + epochStartTrigger, _ := NewEpochStartTrigger(arguments) + + err := epochStartTrigger.ForceEpochStart(1) + assert.Equal(t, epochStart.ErrNotEnoughRoundsBetweenEpochs, err) +} + +func TestTrigger_ForceEpochStartShouldOk(t *testing.T) { + t.Parallel() + + epoch := uint32(0) + arguments := createMockEpochStartTriggerArguments() + arguments.Epoch = epoch + epochStartTrigger, _ := NewEpochStartTrigger(arguments) + + err := epochStartTrigger.ForceEpochStart(1) + assert.Nil(t, err) + + newEpoch := epochStartTrigger.Epoch() + assert.Equal(t, epoch+1, newEpoch) + + isEpochStart := epochStartTrigger.IsEpochStart() + assert.True(t, isEpochStart) +} diff --git a/epochStart/mock/cacherStub.go b/epochStart/mock/cacherStub.go new file mode 100644 index 00000000000..f86d6be6bf1 --- /dev/null +++ b/epochStart/mock/cacherStub.go @@ -0,0 +1,69 @@ +package mock + +type CacherStub struct { + ClearCalled func() + PutCalled func(key []byte, value interface{}) (evicted bool) + GetCalled func(key []byte) (value interface{}, ok bool) + HasCalled func(key []byte) bool + PeekCalled func(key []byte) (value interface{}, ok bool) + HasOrAddCalled func(key []byte, value interface{}) (ok, evicted bool) + RemoveCalled func(key []byte) + RemoveOldestCalled func() + KeysCalled func() [][]byte + LenCalled func() int + MaxSizeCalled func() int + RegisterHandlerCalled func(func(key []byte)) +} + +func (cs *CacherStub) Clear() { + cs.ClearCalled() +} + +func (cs *CacherStub) Put(key []byte, value interface{}) (evicted bool) { + return cs.PutCalled(key, value) +} + +func (cs *CacherStub) Get(key []byte) (value interface{}, ok bool) { + return cs.GetCalled(key) +} + +func (cs *CacherStub) Has(key []byte) bool { + return cs.HasCalled(key) +} + +func (cs *CacherStub) Peek(key []byte) (value interface{}, ok bool) { + return cs.PeekCalled(key) +} + +func (cs *CacherStub) HasOrAdd(key []byte, value interface{}) (ok, evicted bool) { + return cs.HasOrAddCalled(key, value) +} + +func (cs *CacherStub) Remove(key []byte) { + cs.RemoveCalled(key) +} + +func (cs *CacherStub) RemoveOldest() { + cs.RemoveOldestCalled() +} + +func (cs *CacherStub) Keys() [][]byte { + return cs.KeysCalled() +} + +func (cs *CacherStub) Len() int { + return cs.LenCalled() +} + +func (cs *CacherStub) MaxSize() int { + return cs.MaxSizeCalled() +} + +func (cs *CacherStub) RegisterHandler(handler func(key []byte)) { + cs.RegisterHandlerCalled(handler) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cs *CacherStub) IsInterfaceNil() bool { + return cs == nil +} diff --git a/epochStart/mock/chainStorerStub.go b/epochStart/mock/chainStorerStub.go new file mode 100644 index 00000000000..6b0edcc713e --- /dev/null +++ b/epochStart/mock/chainStorerStub.go @@ -0,0 +1,86 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/pkg/errors" +) + +// ChainStorerStub is a mock implementation of the ChianStorer interface +type ChainStorerStub struct { + AddStorerCalled func(key dataRetriever.UnitType, s storage.Storer) + GetStorerCalled func(unitType dataRetriever.UnitType) storage.Storer + HasCalled func(unitType dataRetriever.UnitType, key []byte) error + GetCalled func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) + PutCalled func(unitType dataRetriever.UnitType, key []byte, value []byte) error + GetAllCalled func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) + DestroyCalled func() error +} + +// AddStorer will add a new storer to the chain map +func (bc *ChainStorerStub) AddStorer(key dataRetriever.UnitType, s storage.Storer) { + if bc.AddStorerCalled != nil { + bc.AddStorerCalled(key, s) + } +} + +// GetStorer returns the storer from the chain map or nil if the storer was not found +func (bc *ChainStorerStub) GetStorer(unitType dataRetriever.UnitType) storage.Storer { + if bc.GetStorerCalled != nil { + return bc.GetStorerCalled(unitType) + } + return nil +} + +// Has returns true if the key is found in the selected Unit or false otherwise +// It can return an error if the provided unit type is not supported or if the +// underlying implementation of the storage unit reports an error. +func (bc *ChainStorerStub) Has(unitType dataRetriever.UnitType, key []byte) error { + if bc.HasCalled != nil { + return bc.HasCalled(unitType, key) + } + return errors.New("Key not found") +} + +// Get returns the value for the given key if found in the selected storage unit, +// nil otherwise. It can return an error if the provided unit type is not supported +// or if the storage unit underlying implementation reports an error +func (bc *ChainStorerStub) Get(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + if bc.GetCalled != nil { + return bc.GetCalled(unitType, key) + } + return nil, nil +} + +// Put stores the key, value pair in the selected storage unit +// It can return an error if the provided unit type is not supported +// or if the storage unit underlying implementation reports an error +func (bc *ChainStorerStub) Put(unitType dataRetriever.UnitType, key []byte, value []byte) error { + if bc.PutCalled != nil { + return bc.PutCalled(unitType, key, value) + } + return nil +} + +// GetAll gets all the elements with keys in the keys array, from the selected storage unit +// It can report an error if the provided unit type is not supported, if there is a missing +// key in the unit, or if the underlying implementation of the storage unit reports an error. +func (bc *ChainStorerStub) GetAll(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + if bc.GetAllCalled != nil { + return bc.GetAllCalled(unitType, keys) + } + return nil, nil +} + +// Destroy removes the underlying files/resources used by the storage service +func (bc *ChainStorerStub) Destroy() error { + if bc.DestroyCalled != nil { + return bc.DestroyCalled() + } + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (bc *ChainStorerStub) IsInterfaceNil() bool { + return bc == nil +} diff --git a/epochStart/mock/epochStartNotifierStub.go b/epochStart/mock/epochStartNotifierStub.go new file mode 100644 index 00000000000..c817a027e60 --- /dev/null +++ b/epochStart/mock/epochStartNotifierStub.go @@ -0,0 +1,15 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/data" + +type EpochStartNotifierStub struct { + NotifyAllCalled func(hdr data.HeaderHandler) +} + +func (esnm *EpochStartNotifierStub) NotifyAll(hdr data.HeaderHandler) { + esnm.NotifyAllCalled(hdr) +} + +func (esnm *EpochStartNotifierStub) IsInterfaceNil() bool { + return esnm == nil +} diff --git a/epochStart/mock/hasherMock.go b/epochStart/mock/hasherMock.go new file mode 100644 index 00000000000..bd7ed68ca2a --- /dev/null +++ b/epochStart/mock/hasherMock.go @@ -0,0 +1,34 @@ +package mock + +import "crypto/sha256" + +var sha256EmptyHash []byte + +// HasherMock that will be used for testing +type HasherMock struct { +} + +// Compute will output the SHA's equivalent of the input string +func (sha HasherMock) Compute(s string) []byte { + h := sha256.New() + _, _ = h.Write([]byte(s)) + return h.Sum(nil) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (sha HasherMock) EmptyHash() []byte { + if len(sha256EmptyHash) == 0 { + sha256EmptyHash = sha.Compute("") + } + return sha256EmptyHash +} + +// Size return the required size in bytes +func (HasherMock) Size() int { + return sha256.Size +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sha HasherMock) IsInterfaceNil() bool { + return false +} diff --git a/epochStart/mock/headerValidatorStub.go b/epochStart/mock/headerValidatorStub.go new file mode 100644 index 00000000000..c75b7b47c88 --- /dev/null +++ b/epochStart/mock/headerValidatorStub.go @@ -0,0 +1,19 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/data" + +type HeaderValidatorStub struct { + IsHeaderConstructionValidCalled func(currHdr, prevHdr data.HeaderHandler) error +} + +func (hvs *HeaderValidatorStub) IsHeaderConstructionValid(currHdr, prevHdr data.HeaderHandler) error { + if hvs.IsHeaderConstructionValidCalled != nil { + return hvs.IsHeaderConstructionValidCalled(currHdr, prevHdr) + } + return nil +} + +// IsInterfaceNil returns if underlying object is true +func (hvs *HeaderValidatorStub) IsInterfaceNil() bool { + return hvs == nil +} diff --git a/epochStart/mock/headersCacherStub.go b/epochStart/mock/headersCacherStub.go new file mode 100644 index 00000000000..aec5bcf8c0a --- /dev/null +++ b/epochStart/mock/headersCacherStub.go @@ -0,0 +1,83 @@ +package mock + +import ( + "errors" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type HeadersCacherStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int +} + +func (hcs *HeadersCacherStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hcs.AddCalled != nil { + hcs.AddCalled(headerHash, header) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByHash(headerHash []byte) { + if hcs.RemoveHeaderByHashCalled != nil { + hcs.RemoveHeaderByHashCalled(headerHash) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hcs.RemoveHeaderByNonceAndShardIdCalled != nil { + hcs.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +func (hcs *HeadersCacherStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hcs.GetHeaderByNonceAndShardIdCalled != nil { + return hcs.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +func (hcs *HeadersCacherStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hcs.GetHeaderByHashCalled != nil { + return hcs.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +func (hcs *HeadersCacherStub) Clear() { + if hcs.ClearCalled != nil { + hcs.ClearCalled() + } +} + +func (hcs *HeadersCacherStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hcs.RegisterHandlerCalled != nil { + hcs.RegisterHandlerCalled(handler) + } +} + +func (hcs *HeadersCacherStub) Nonces(shardId uint32) []uint64 { + if hcs.NoncesCalled != nil { + return hcs.NoncesCalled(shardId) + } + return nil +} + +func (hcs *HeadersCacherStub) Len() int { + return 0 +} + +func (hcs *HeadersCacherStub) MaxSize() int { + return 100 +} + +func (hcs *HeadersCacherStub) IsInterfaceNil() bool { + return hcs == nil +} diff --git a/epochStart/mock/marshalizerMock.go b/epochStart/mock/marshalizerMock.go new file mode 100644 index 00000000000..5299a5bb257 --- /dev/null +++ b/epochStart/mock/marshalizerMock.go @@ -0,0 +1,52 @@ +package mock + +import ( + "encoding/json" + "errors" +) + +var errMockMarshalizer = errors.New("MarshalizerMock generic error") + +// MarshalizerMock that will be used for testing +type MarshalizerMock struct { + Fail bool +} + +// Marshal converts the input object in a slice of bytes +func (mm *MarshalizerMock) Marshal(obj interface{}) ([]byte, error) { + if mm.Fail { + return nil, errMockMarshalizer + } + + if obj == nil { + return nil, errors.New("nil object to serilize from") + } + + return json.Marshal(obj) +} + +// Unmarshal applies the serialized values over an instantiated object +func (mm *MarshalizerMock) Unmarshal(obj interface{}, buff []byte) error { + if mm.Fail { + return errMockMarshalizer + } + + if obj == nil { + return errors.New("nil object to serilize to") + } + + if buff == nil { + return errors.New("nil byte buffer to deserialize from") + } + + if len(buff) == 0 { + return errors.New("empty byte buffer to deserialize from") + } + + return json.Unmarshal(buff, obj) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mm *MarshalizerMock) IsInterfaceNil() bool { + return mm == nil +} diff --git a/epochStart/mock/poolsHolderStub.go b/epochStart/mock/poolsHolderStub.go new file mode 100644 index 00000000000..4542a57807a --- /dev/null +++ b/epochStart/mock/poolsHolderStub.go @@ -0,0 +1,54 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type PoolsHolderStub struct { + HeadersCalled func() dataRetriever.HeadersPool + PeerChangesBlocksCalled func() storage.Cacher + TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + MiniBlocksCalled func() storage.Cacher + TrieNodesCalled func() storage.Cacher + CurrBlockTxsCalled func() dataRetriever.TransactionCacher +} + +func (phs *PoolsHolderStub) CurrentBlockTxs() dataRetriever.TransactionCacher { + return phs.CurrBlockTxsCalled() +} + +func (phs *PoolsHolderStub) Headers() dataRetriever.HeadersPool { + return phs.HeadersCalled() +} + +func (phs *PoolsHolderStub) PeerChangesBlocks() storage.Cacher { + return phs.PeerChangesBlocksCalled() +} + +func (phs *PoolsHolderStub) Transactions() dataRetriever.ShardedDataCacherNotifier { + return phs.TransactionsCalled() +} + +func (phs *PoolsHolderStub) MiniBlocks() storage.Cacher { + return phs.MiniBlocksCalled() +} + +func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.UnsignedTransactionsCalled() +} + +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} + +func (phs *PoolsHolderStub) TrieNodes() storage.Cacher { + return phs.TrieNodesCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (phs *PoolsHolderStub) IsInterfaceNil() bool { + return phs == nil +} diff --git a/epochStart/mock/requestHandlerStub.go b/epochStart/mock/requestHandlerStub.go new file mode 100644 index 00000000000..ca0fb3bdcb6 --- /dev/null +++ b/epochStart/mock/requestHandlerStub.go @@ -0,0 +1,73 @@ +package mock + +type RequestHandlerStub struct { + RequestShardHeaderCalled func(shardId uint32, hash []byte) + RequestMetaHeaderCalled func(hash []byte) + RequestMetaHeaderByNonceCalled func(nonce uint64) + RequestShardHeaderByNonceCalled func(shardId uint32, nonce uint64) + RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) +} + +func (rhs *RequestHandlerStub) RequestShardHeader(shardId uint32, hash []byte) { + if rhs.RequestShardHeaderCalled == nil { + return + } + rhs.RequestShardHeaderCalled(shardId, hash) +} + +func (rhs *RequestHandlerStub) RequestMetaHeader(hash []byte) { + if rhs.RequestMetaHeaderCalled == nil { + return + } + rhs.RequestMetaHeaderCalled(hash) +} + +func (rhs *RequestHandlerStub) RequestMetaHeaderByNonce(nonce uint64) { + if rhs.RequestMetaHeaderByNonceCalled == nil { + return + } + rhs.RequestMetaHeaderByNonceCalled(nonce) +} + +func (rhs *RequestHandlerStub) RequestShardHeaderByNonce(shardId uint32, nonce uint64) { + if rhs.RequestShardHeaderByNonceCalled == nil { + return + } + rhs.RequestShardHeaderByNonceCalled(shardId, nonce) +} + +func (rhs *RequestHandlerStub) RequestTransaction(destShardID uint32, txHashes [][]byte) { + if rhs.RequestTransactionHandlerCalled == nil { + return + } + rhs.RequestTransactionHandlerCalled(destShardID, txHashes) +} + +func (rhs *RequestHandlerStub) RequestUnsignedTransactions(destShardID uint32, txHashes [][]byte) { + if rhs.RequestScrHandlerCalled == nil { + return + } + rhs.RequestScrHandlerCalled(destShardID, txHashes) +} + +func (rhs *RequestHandlerStub) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) { + if rhs.RequestRewardTxHandlerCalled == nil { + return + } + rhs.RequestRewardTxHandlerCalled(destShardID, txHashes) +} + +func (rhs *RequestHandlerStub) RequestMiniBlock(shardId uint32, miniblockHash []byte) { + if rhs.RequestMiniBlockHandlerCalled == nil { + return + } + rhs.RequestMiniBlockHandlerCalled(shardId, miniblockHash) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rhs *RequestHandlerStub) IsInterfaceNil() bool { + return rhs == nil +} diff --git a/epochStart/mock/rounderStub.go b/epochStart/mock/rounderStub.go new file mode 100644 index 00000000000..73252b7d5cf --- /dev/null +++ b/epochStart/mock/rounderStub.go @@ -0,0 +1,61 @@ +package mock + +import ( + "time" +) + +type RounderStub struct { + RoundIndex int64 + + IndexCalled func() int64 + TimeDurationCalled func() time.Duration + TimeStampCalled func() time.Time + UpdateRoundCalled func(time.Time, time.Time) + RemainingTimeCalled func(startTime time.Time, maxTime time.Duration) time.Duration +} + +func (rndm *RounderStub) Index() int64 { + if rndm.IndexCalled != nil { + return rndm.IndexCalled() + } + + return rndm.RoundIndex +} + +func (rndm *RounderStub) TimeDuration() time.Duration { + if rndm.TimeDurationCalled != nil { + return rndm.TimeDurationCalled() + } + + return time.Duration(4000 * time.Millisecond) +} + +func (rndm *RounderStub) TimeStamp() time.Time { + if rndm.TimeStampCalled != nil { + return rndm.TimeStampCalled() + } + + return time.Unix(0, 0) +} + +func (rndm *RounderStub) UpdateRound(genesisRoundTimeStamp time.Time, timeStamp time.Time) { + if rndm.UpdateRoundCalled != nil { + rndm.UpdateRoundCalled(genesisRoundTimeStamp, timeStamp) + return + } + + rndm.RoundIndex++ +} + +func (rndm *RounderStub) RemainingTime(startTime time.Time, maxTime time.Duration) time.Duration { + if rndm.RemainingTimeCalled != nil { + return rndm.RemainingTimeCalled(startTime, maxTime) + } + + return time.Duration(4000 * time.Millisecond) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rndm *RounderStub) IsInterfaceNil() bool { + return rndm == nil +} diff --git a/epochStart/mock/shardIdHashMapStub.go b/epochStart/mock/shardIdHashMapStub.go new file mode 100644 index 00000000000..f87fc07de94 --- /dev/null +++ b/epochStart/mock/shardIdHashMapStub.go @@ -0,0 +1,29 @@ +package mock + +type ShardIdHasMapStub struct { + LoadCalled func(shardId uint32) ([]byte, bool) + StoreCalled func(shardId uint32, hash []byte) + RangeCalled func(f func(shardId uint32, hash []byte) bool) + DeleteCalled func(shardId uint32) +} + +func (sihsm *ShardIdHasMapStub) Load(shardId uint32) ([]byte, bool) { + return sihsm.LoadCalled(shardId) +} + +func (sihsm *ShardIdHasMapStub) Store(shardId uint32, hash []byte) { + sihsm.StoreCalled(shardId, hash) +} + +func (sihsm *ShardIdHasMapStub) Range(f func(shardId uint32, hash []byte) bool) { + sihsm.RangeCalled(f) +} + +func (sihsm *ShardIdHasMapStub) Delete(shardId uint32) { + sihsm.DeleteCalled(shardId) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sihsm *ShardIdHasMapStub) IsInterfaceNil() bool { + return sihsm == nil +} diff --git a/epochStart/mock/storerStub.go b/epochStart/mock/storerStub.go new file mode 100644 index 00000000000..96425489da6 --- /dev/null +++ b/epochStart/mock/storerStub.go @@ -0,0 +1,39 @@ +package mock + +type StorerStub struct { + PutCalled func(key, data []byte) error + GetCalled func(key []byte) ([]byte, error) + HasCalled func(key []byte) error + RemoveCalled func(key []byte) error + ClearCacheCalled func() + DestroyUnitCalled func() error +} + +func (ss *StorerStub) Put(key, data []byte) error { + return ss.PutCalled(key, data) +} + +func (ss *StorerStub) Get(key []byte) ([]byte, error) { + return ss.GetCalled(key) +} + +func (ss *StorerStub) Has(key []byte) error { + return ss.HasCalled(key) +} + +func (ss *StorerStub) Remove(key []byte) error { + return ss.RemoveCalled(key) +} + +func (ss *StorerStub) ClearCache() { + ss.ClearCacheCalled() +} + +func (ss *StorerStub) DestroyUnit() error { + return ss.DestroyUnitCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ss *StorerStub) IsInterfaceNil() bool { + return ss == nil +} diff --git a/epochStart/mock/syncTimerStub.go b/epochStart/mock/syncTimerStub.go new file mode 100644 index 00000000000..f86ddbdb6a7 --- /dev/null +++ b/epochStart/mock/syncTimerStub.go @@ -0,0 +1,38 @@ +package mock + +import ( + "time" +) + +// SyncTimerStub is a mock implementation of SyncTimer interface +type SyncTimerStub struct { + StartSyncCalled func() + ClockOffsetCalled func() time.Duration + FormattedCurrentTimeCalled func() string + CurrentTimeCalled func() time.Time +} + +// StartSync is a mock implementation for StartSync +func (s *SyncTimerStub) StartSync() { + s.StartSyncCalled() +} + +// ClockOffset is a mock implementation for ClockOffset +func (s *SyncTimerStub) ClockOffset() time.Duration { + return s.ClockOffsetCalled() +} + +// FormattedCurrentTime is a mock implementation for FormattedCurrentTime +func (s *SyncTimerStub) FormattedCurrentTime() string { + return s.FormattedCurrentTimeCalled() +} + +// CurrentTime is a mock implementation for CurrentTime +func (s *SyncTimerStub) CurrentTime() time.Time { + return s.CurrentTimeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (stm *SyncTimerStub) IsInterfaceNil() bool { + return stm == nil +} diff --git a/epochStart/mock/uint64ByteSliceConverterMock.go b/epochStart/mock/uint64ByteSliceConverterMock.go new file mode 100644 index 00000000000..68c1ec89397 --- /dev/null +++ b/epochStart/mock/uint64ByteSliceConverterMock.go @@ -0,0 +1,28 @@ +package mock + +// Uint64ByteSliceConverterMock converts byte slice to/from uint64 +type Uint64ByteSliceConverterMock struct { + ToByteSliceCalled func(uint64) []byte + ToUint64Called func([]byte) (uint64, error) +} + +// ToByteSlice is a mock implementation for Uint64ByteSliceConverter +func (u *Uint64ByteSliceConverterMock) ToByteSlice(p uint64) []byte { + if u.ToByteSliceCalled == nil { + return []byte("") + } + return u.ToByteSliceCalled(p) +} + +// ToUint64 is a mock implementation for Uint64ByteSliceConverter +func (u *Uint64ByteSliceConverterMock) ToUint64(p []byte) (uint64, error) { + if u.ToUint64Called == nil { + return 0, nil + } + return u.ToUint64Called(p) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (u *Uint64ByteSliceConverterMock) IsInterfaceNil() bool { + return u == nil +} diff --git a/epochStart/mock/uint64SyncMapCacherStub.go b/epochStart/mock/uint64SyncMapCacherStub.go new file mode 100644 index 00000000000..7c58567fe34 --- /dev/null +++ b/epochStart/mock/uint64SyncMapCacherStub.go @@ -0,0 +1,43 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/dataRetriever" +) + +type Uint64SyncMapCacherStub struct { + ClearCalled func() + GetCalled func(nonce uint64) (dataRetriever.ShardIdHashMap, bool) + MergeCalled func(nonce uint64, src dataRetriever.ShardIdHashMap) + RemoveCalled func(nonce uint64, shardId uint32) + RegisterHandlerCalled func(handler func(nonce uint64, shardId uint32, value []byte)) + HasCalled func(nonce uint64, shardId uint32) bool +} + +func (usmcs *Uint64SyncMapCacherStub) Clear() { + usmcs.ClearCalled() +} + +func (usmcs *Uint64SyncMapCacherStub) Get(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { + return usmcs.GetCalled(nonce) +} + +func (usmcs *Uint64SyncMapCacherStub) Merge(nonce uint64, src dataRetriever.ShardIdHashMap) { + usmcs.MergeCalled(nonce, src) +} + +func (usmcs *Uint64SyncMapCacherStub) RegisterHandler(handler func(nonce uint64, shardId uint32, value []byte)) { + usmcs.RegisterHandlerCalled(handler) +} + +func (usmcs *Uint64SyncMapCacherStub) Has(nonce uint64, shardId uint32) bool { + return usmcs.HasCalled(nonce, shardId) +} + +func (usmcs *Uint64SyncMapCacherStub) Remove(nonce uint64, shardId uint32) { + usmcs.RemoveCalled(nonce, shardId) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (usmcs *Uint64SyncMapCacherStub) IsInterfaceNil() bool { + return usmcs == nil +} diff --git a/epochStart/notifier/common.go b/epochStart/notifier/common.go new file mode 100644 index 00000000000..a17c84f585e --- /dev/null +++ b/epochStart/notifier/common.go @@ -0,0 +1,26 @@ +package notifier + +import "github.com/ElrondNetwork/elrond-go/data" + +// SubscribeFunctionHandler defines what a struct which contain a handler function for epoch start should do +type SubscribeFunctionHandler interface { + EpochStartAction(hdr data.HeaderHandler) +} + +// MakeHandlerForEpochStart will return a struct which will satisfy the above interface +func MakeHandlerForEpochStart(funcForSubscription func(hdr data.HeaderHandler)) SubscribeFunctionHandler { + handler := handlerStruct{subscribedFunc: funcForSubscription} + return &handler +} + +// handlerStruct represents a struct which satisfies the SubscribeFunctionHandler interface +type handlerStruct struct { + subscribedFunc func(hdr data.HeaderHandler) +} + +// EpochStartAction will notify the subscribed function if not nil +func (hs *handlerStruct) EpochStartAction(hdr data.HeaderHandler) { + if hs.subscribedFunc != nil { + hs.subscribedFunc(hdr) + } +} diff --git a/epochStart/notifier/epochStartSubscriptionHandler.go b/epochStart/notifier/epochStartSubscriptionHandler.go new file mode 100644 index 00000000000..56002d62a8a --- /dev/null +++ b/epochStart/notifier/epochStartSubscriptionHandler.go @@ -0,0 +1,57 @@ +package notifier + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/data" +) + +// epochStartSubscriptionHandler will handle subscription of function and notifying them +type epochStartSubscriptionHandler struct { + epochStartHandlers []SubscribeFunctionHandler + mutEpochStartHandler sync.RWMutex +} + +// NewEpochStartSubscriptionHandler returns a new instance of epochStartSubscriptionHandler +func NewEpochStartSubscriptionHandler() *epochStartSubscriptionHandler { + return &epochStartSubscriptionHandler{ + epochStartHandlers: make([]SubscribeFunctionHandler, 0), + mutEpochStartHandler: sync.RWMutex{}, + } +} + +// RegisterHandler will subscribe a function so it will be called when NotifyAll method is called +func (essh *epochStartSubscriptionHandler) RegisterHandler(handler SubscribeFunctionHandler) { + if handler != nil { + essh.mutEpochStartHandler.Lock() + essh.epochStartHandlers = append(essh.epochStartHandlers, handler) + essh.mutEpochStartHandler.Unlock() + } +} + +// UnregisterHandler will unsubscribe a function from the slice +func (essh *epochStartSubscriptionHandler) UnregisterHandler(handlerToUnregister SubscribeFunctionHandler) { + if handlerToUnregister != nil { + essh.mutEpochStartHandler.RLock() + for idx, handler := range essh.epochStartHandlers { + if handler == handlerToUnregister { + essh.epochStartHandlers = append(essh.epochStartHandlers[:idx], essh.epochStartHandlers[idx+1:]...) + } + } + essh.mutEpochStartHandler.RUnlock() + } +} + +// NotifyAll will call all the subscribed functions from the internal slice +func (essh *epochStartSubscriptionHandler) NotifyAll(hdr data.HeaderHandler) { + essh.mutEpochStartHandler.Lock() + for _, handler := range essh.epochStartHandlers { + handler.EpochStartAction(hdr) + } + essh.mutEpochStartHandler.Unlock() +} + +// IsInterfaceNil - +func (essh *epochStartSubscriptionHandler) IsInterfaceNil() bool { + return essh == nil +} diff --git a/epochStart/notifier/epochStartSubscriptionHandler_test.go b/epochStart/notifier/epochStartSubscriptionHandler_test.go new file mode 100644 index 00000000000..8be8f9caefa --- /dev/null +++ b/epochStart/notifier/epochStartSubscriptionHandler_test.go @@ -0,0 +1,108 @@ +package notifier_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" + "github.com/stretchr/testify/assert" +) + +func TestNewEpochStartSubscriptionHandler(t *testing.T) { + t.Parallel() + + essh := notifier.NewEpochStartSubscriptionHandler() + assert.NotNil(t, essh) + assert.False(t, essh.IsInterfaceNil()) +} + +func TestEpochStartSubscriptionHandler_RegisterHandlerNilHandlerShouldNotAdd(t *testing.T) { + t.Parallel() + + essh := notifier.NewEpochStartSubscriptionHandler() + essh.RegisterHandler(nil) + + handlers, mutHandlers := essh.RegisteredHandlers() + mutHandlers.RLock() + assert.Equal(t, 0, len(handlers)) + mutHandlers.RUnlock() +} + +func TestEpochStartSubscriptionHandler_RegisterHandlerOkHandlerShouldAdd(t *testing.T) { + t.Parallel() + + essh := notifier.NewEpochStartSubscriptionHandler() + handler := notifier.MakeHandlerForEpochStart(func(hdr data.HeaderHandler) {}) + + essh.RegisterHandler(handler) + + handlers, mutHandlers := essh.RegisteredHandlers() + mutHandlers.RLock() + assert.Equal(t, 1, len(handlers)) + mutHandlers.RUnlock() +} + +func TestEpochStartSubscriptionHandler_UnregisterHandlerNilHandlerShouldDoNothing(t *testing.T) { + t.Parallel() + + essh := notifier.NewEpochStartSubscriptionHandler() + + // first register a handler + handler := notifier.MakeHandlerForEpochStart(func(hdr data.HeaderHandler) {}) + essh.RegisterHandler(handler) + + // then try to unregister but a nil handler is given + essh.UnregisterHandler(nil) + handlers, mutHandlers := essh.RegisteredHandlers() + mutHandlers.RLock() + // length of the slice should still be 1 + assert.Equal(t, 1, len(handlers)) + mutHandlers.RUnlock() +} + +func TestEpochStartSubscriptionHandler_UnregisterHandlerOklHandlerShouldRemove(t *testing.T) { + t.Parallel() + + essh := notifier.NewEpochStartSubscriptionHandler() + + // first register a handler + handler := notifier.MakeHandlerForEpochStart(func(hdr data.HeaderHandler) {}) + essh.RegisterHandler(handler) + + // then unregister the same handler + essh.UnregisterHandler(handler) + handlers, mutHandlers := essh.RegisteredHandlers() + mutHandlers.RLock() + // length of the slice should be 0 because the handler was unregistered + assert.Equal(t, 0, len(handlers)) + mutHandlers.RUnlock() +} + +func TestEpochStartSubscriptionHandler_NotifyAll(t *testing.T) { + t.Parallel() + + firstHandlerWasCalled := false + secondHandlerWasCalled := false + essh := notifier.NewEpochStartSubscriptionHandler() + + // register 2 handlers + handler1 := notifier.MakeHandlerForEpochStart(func(hdr data.HeaderHandler) { + firstHandlerWasCalled = true + }) + handler2 := notifier.MakeHandlerForEpochStart(func(hdr data.HeaderHandler) { + secondHandlerWasCalled = true + }) + + essh.RegisterHandler(handler1) + essh.RegisterHandler(handler2) + + // make sure that the handler were not called yet + assert.False(t, firstHandlerWasCalled) + assert.False(t, secondHandlerWasCalled) + + // now we call the NotifyAll method and all handlers should be called + essh.NotifyAll(&block.Header{}) + assert.True(t, firstHandlerWasCalled) + assert.True(t, secondHandlerWasCalled) +} diff --git a/epochStart/notifier/export_test.go b/epochStart/notifier/export_test.go new file mode 100644 index 00000000000..35188a55b28 --- /dev/null +++ b/epochStart/notifier/export_test.go @@ -0,0 +1,9 @@ +package notifier + +import ( + "sync" +) + +func (essh *epochStartSubscriptionHandler) RegisteredHandlers() ([]SubscribeFunctionHandler, *sync.RWMutex) { + return essh.epochStartHandlers, &essh.mutEpochStartHandler +} diff --git a/epochStart/shardchain/testTrigger.go b/epochStart/shardchain/testTrigger.go new file mode 100644 index 00000000000..6b547a12ac3 --- /dev/null +++ b/epochStart/shardchain/testTrigger.go @@ -0,0 +1,30 @@ +package shardchain + +import "github.com/ElrondNetwork/elrond-go/epochStart" + +// TestTrigger extends start of epoch trigger and is used in integration tests as it exposes some functions +// that are not supposed to be used in production code +// Exported functions simplify the reproduction of edge cases +type TestTrigger struct { + *trigger +} + +// SetTrigger sets the start of epoch trigger +func (t *TestTrigger) SetTrigger(triggerHandler epochStart.TriggerHandler) { + actualTrigger, ok := triggerHandler.(*trigger) + if !ok { + return + } + + t.trigger = actualTrigger +} + +// SetRoundsPerEpoch sets the number of round between epochs +func (t *TestTrigger) SetRoundsPerEpoch(roundsPerEpoch uint64) { + //does nothing as trigger in shards is not done by chronology +} + +// GetRoundsPerEpoch gets the number of rounds per epoch +func (t *TestTrigger) GetRoundsPerEpoch() uint64 { + return 0 +} diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go new file mode 100644 index 00000000000..eb09fdc12c0 --- /dev/null +++ b/epochStart/shardchain/trigger.go @@ -0,0 +1,509 @@ +package shardchain + +import ( + "bytes" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/typeConverters" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.GetOrCreate("epochStart/shardchain") + +// ArgsShardEpochStartTrigger struct { defines the arguments needed for new start of epoch trigger +type ArgsShardEpochStartTrigger struct { + Marshalizer marshal.Marshalizer + Hasher hashing.Hasher + + HeaderValidator epochStart.HeaderValidator + Uint64Converter typeConverters.Uint64ByteSliceConverter + + DataPool dataRetriever.PoolsHolder + Storage dataRetriever.StorageService + RequestHandler epochStart.RequestHandler + EpochStartNotifier epochStart.StartOfEpochNotifier + + Epoch uint32 + Validity uint64 + Finality uint64 +} + +type trigger struct { + epoch uint32 + currentRoundIndex int64 + epochStartRound uint64 + epochMetaBlockHash []byte + isEpochStart bool + finality uint64 + validity uint64 + epochFinalityAttestingRound uint64 + + newEpochHdrReceived bool + + mutTrigger sync.RWMutex + mapHashHdr map[string]*block.MetaBlock + mapNonceHashes map[uint64][]string + mapEpochStartHdrs map[string]*block.MetaBlock + + headersPool dataRetriever.HeadersPool + metaHdrStorage storage.Storer + metaNonceHdrStorage storage.Storer + uint64Converter typeConverters.Uint64ByteSliceConverter + + marshalizer marshal.Marshalizer + hasher hashing.Hasher + headerValidator epochStart.HeaderValidator + + requestHandler epochStart.RequestHandler + epochStartNotifier epochStart.StartOfEpochNotifier +} + +// NewEpochStartTrigger creates a trigger to signal start of epoch +func NewEpochStartTrigger(args *ArgsShardEpochStartTrigger) (*trigger, error) { + if args == nil { + return nil, epochStart.ErrNilArgsNewShardEpochStartTrigger + } + if check.IfNil(args.Hasher) { + return nil, epochStart.ErrNilHasher + } + if check.IfNil(args.Marshalizer) { + return nil, epochStart.ErrNilMarshalizer + } + if check.IfNil(args.HeaderValidator) { + return nil, epochStart.ErrNilHeaderValidator + } + if check.IfNil(args.DataPool) { + return nil, epochStart.ErrNilDataPoolsHolder + } + if check.IfNil(args.Storage) { + return nil, epochStart.ErrNilStorageService + } + if check.IfNil(args.RequestHandler) { + return nil, epochStart.ErrNilRequestHandler + } + if check.IfNil(args.DataPool.Headers()) { + return nil, epochStart.ErrNilMetaBlocksPool + } + if check.IfNil(args.Uint64Converter) { + return nil, epochStart.ErrNilUint64Converter + } + if check.IfNil(args.EpochStartNotifier) { + return nil, epochStart.ErrNilEpochStartNotifier + } + + metaHdrStorage := args.Storage.GetStorer(dataRetriever.MetaBlockUnit) + if check.IfNil(metaHdrStorage) { + return nil, epochStart.ErrNilMetaHdrStorage + } + + metaHdrNoncesStorage := args.Storage.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) + if check.IfNil(metaHdrNoncesStorage) { + return nil, epochStart.ErrNilMetaNonceHashStorage + } + + newTrigger := &trigger{ + epoch: args.Epoch, + currentRoundIndex: 0, + epochStartRound: 0, + epochFinalityAttestingRound: 0, + isEpochStart: false, + validity: args.Validity, + finality: args.Finality, + newEpochHdrReceived: false, + mutTrigger: sync.RWMutex{}, + mapHashHdr: make(map[string]*block.MetaBlock), + mapNonceHashes: make(map[uint64][]string), + mapEpochStartHdrs: make(map[string]*block.MetaBlock), + headersPool: args.DataPool.Headers(), + metaHdrStorage: metaHdrStorage, + metaNonceHdrStorage: metaHdrNoncesStorage, + uint64Converter: args.Uint64Converter, + marshalizer: args.Marshalizer, + hasher: args.Hasher, + headerValidator: args.HeaderValidator, + requestHandler: args.RequestHandler, + epochMetaBlockHash: nil, + epochStartNotifier: args.EpochStartNotifier, + } + return newTrigger, nil +} + +// IsEpochStart returns true if conditions are fulfilled for start of epoch +func (t *trigger) IsEpochStart() bool { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.isEpochStart +} + +// Epoch returns the current epoch number +func (t *trigger) Epoch() uint32 { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.epoch +} + +// EpochStartRound returns the start round of the current epoch +func (t *trigger) EpochStartRound() uint64 { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.epochStartRound +} + +// EpochFinalityAttestingRound returns the round when epoch start block was finalized +func (t *trigger) EpochFinalityAttestingRound() uint64 { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + return t.epochFinalityAttestingRound +} + +// ForceEpochStart sets the conditions for start of epoch to true in case of edge cases +func (t *trigger) ForceEpochStart(_ uint64) error { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + return nil +} + +// ReceivedHeader saves the header into pool to verify if end-of-epoch conditions are fulfilled +func (t *trigger) ReceivedHeader(header data.HeaderHandler) { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + if t.isEpochStart && header.GetEpoch() == t.epoch { + return + } + + metaHdr, ok := header.(*block.MetaBlock) + if !ok { + return + } + + if !t.newEpochHdrReceived && !metaHdr.IsStartOfEpochBlock() { + return + } + + isMetaStartOfEpochForCurrentEpoch := metaHdr.Epoch == t.epoch && metaHdr.IsStartOfEpochBlock() + if isMetaStartOfEpochForCurrentEpoch { + return + } + + hdrHash, err := core.CalculateHash(t.marshalizer, t.hasher, metaHdr) + if err != nil { + return + } + + if _, ok = t.mapHashHdr[string(hdrHash)]; ok { + return + } + if _, ok = t.mapEpochStartHdrs[string(hdrHash)]; ok { + return + } + + t.updateTriggerFromMeta(metaHdr, hdrHash) +} + +// call only if mutex is locked before +func (t *trigger) updateTriggerFromMeta(metaHdr *block.MetaBlock, hdrHash []byte) { + if metaHdr.IsStartOfEpochBlock() { + t.newEpochHdrReceived = true + t.mapEpochStartHdrs[string(hdrHash)] = metaHdr + } else { + t.mapHashHdr[string(hdrHash)] = metaHdr + t.mapNonceHashes[metaHdr.Nonce] = append(t.mapNonceHashes[metaHdr.Nonce], string(hdrHash)) + } + + for hash, meta := range t.mapEpochStartHdrs { + canActivateEpochStart, finalityAttestingRound := t.checkIfTriggerCanBeActivated(hash, meta) + if canActivateEpochStart && t.epoch < meta.Epoch { + t.epoch = meta.Epoch + t.isEpochStart = true + t.epochStartRound = meta.Round + t.epochFinalityAttestingRound = finalityAttestingRound + t.epochMetaBlockHash = []byte(hash) + + metaBuff, err := t.marshalizer.Marshal(meta) + if err != nil { + log.Debug("updateTriggerFromMeta marshal", "error", err.Error()) + continue + } + + epochStartIdentifier := core.EpochStartIdentifier(meta.Epoch) + err = t.metaHdrStorage.Put([]byte(epochStartIdentifier), metaBuff) + if err != nil { + log.Debug("updateTriggerMeta put into metaHdrStorage", "error", err.Error()) + continue + } + } + } +} + +// call only if mutex is locked before +func (t *trigger) isMetaBlockValid(_ string, metaHdr *block.MetaBlock) bool { + currHdr := metaHdr + for i := metaHdr.Nonce - 1; i >= metaHdr.Nonce-t.validity; i-- { + neededHdr, err := t.getHeaderWithNonceAndHash(i, currHdr.PrevHash) + if err != nil { + return false + } + + err = t.headerValidator.IsHeaderConstructionValid(currHdr, neededHdr) + if err != nil { + return false + } + } + + return true +} + +func (t *trigger) isMetaBlockFinal(_ string, metaHdr *block.MetaBlock) (bool, uint64) { + nextBlocksVerified := uint64(0) + finalityAttestingRound := metaHdr.Round + currHdr := metaHdr + for nonce := metaHdr.Nonce + 1; nonce <= metaHdr.Nonce+t.finality; nonce++ { + currHash, err := core.CalculateHash(t.marshalizer, t.hasher, currHdr) + if err != nil { + continue + } + + neededHdr, err := t.getHeaderWithNonceAndPrevHash(nonce, currHash) + if err != nil { + continue + } + + err = t.headerValidator.IsHeaderConstructionValid(neededHdr, currHdr) + if err != nil { + continue + } + + currHdr = neededHdr + + finalityAttestingRound = currHdr.GetRound() + nextBlocksVerified += 1 + } + + if nextBlocksVerified < t.finality { + for nonce := currHdr.Nonce + 1; nonce <= currHdr.Nonce+t.finality; nonce++ { + go t.requestHandler.RequestMetaHeaderByNonce(nonce) + } + return false, 0 + } + + return true, finalityAttestingRound +} + +// call only if mutex is locked before +func (t *trigger) checkIfTriggerCanBeActivated(hash string, metaHdr *block.MetaBlock) (bool, uint64) { + isMetaHdrValid := t.isMetaBlockValid(hash, metaHdr) + if !isMetaHdrValid { + return false, 0 + } + + isMetaHdrFinal, finalityAttestingRound := t.isMetaBlockFinal(hash, metaHdr) + return isMetaHdrFinal, finalityAttestingRound +} + +// call only if mutex is locked before +func (t *trigger) getHeaderWithNonceAndHashFromMaps(nonce uint64, neededHash []byte) *block.MetaBlock { + metaHdrHashesWithNonce := t.mapNonceHashes[nonce] + for _, hash := range metaHdrHashesWithNonce { + if !bytes.Equal(neededHash, []byte(hash)) { + continue + } + + neededHdr := t.mapHashHdr[hash] + if neededHdr != nil { + return neededHdr + } + } + + return nil +} + +// call only if mutex is locked before +func (t *trigger) getHeaderWithHashFromPool(neededHash []byte) *block.MetaBlock { + peekedData, _ := t.headersPool.GetHeaderByHash(neededHash) + neededHdr, ok := peekedData.(*block.MetaBlock) + if ok { + t.mapHashHdr[string(neededHash)] = neededHdr + t.mapNonceHashes[neededHdr.Nonce] = append(t.mapNonceHashes[neededHdr.Nonce], string(neededHash)) + return neededHdr + } + + return nil +} + +// call only if mutex is locked before +func (t *trigger) getHeaderWithHashFromStorage(neededHash []byte) *block.MetaBlock { + storageData, err := t.metaHdrStorage.Get(neededHash) + if err == nil { + var neededHdr block.MetaBlock + err = t.marshalizer.Unmarshal(&neededHdr, storageData) + if err == nil { + t.mapHashHdr[string(neededHash)] = &neededHdr + t.mapNonceHashes[neededHdr.Nonce] = append(t.mapNonceHashes[neededHdr.Nonce], string(neededHash)) + return &neededHdr + } + } + + return nil +} + +// call only if mutex is locked before +func (t *trigger) getHeaderWithNonceAndHash(nonce uint64, neededHash []byte) (*block.MetaBlock, error) { + metaHdr := t.getHeaderWithNonceAndHashFromMaps(nonce, neededHash) + if metaHdr != nil { + return metaHdr, nil + } + + metaHdr = t.getHeaderWithHashFromPool(neededHash) + if metaHdr != nil { + return metaHdr, nil + } + + metaHdr = t.getHeaderWithHashFromStorage(neededHash) + if metaHdr != nil { + return metaHdr, nil + } + + go t.requestHandler.RequestMetaHeader(neededHash) + + return nil, epochStart.ErrMetaHdrNotFound +} + +// call only if mutex is locked before +func (t *trigger) getHeaderWithNonceAndPrevHashFromMaps(nonce uint64, prevHash []byte) *block.MetaBlock { + metaHdrHashesWithNonce := t.mapNonceHashes[nonce] + for _, hash := range metaHdrHashesWithNonce { + hdrWithNonce := t.mapHashHdr[hash] + if hdrWithNonce != nil && bytes.Equal(hdrWithNonce.PrevHash, prevHash) { + return hdrWithNonce + } + } + return nil +} + +// call only if mutex is locked before +func (t *trigger) getHeaderWithNonceAndPrevHashFromCache(nonce uint64, prevHash []byte) *block.MetaBlock { + headers, hashes, err := t.headersPool.GetHeadersByNonceAndShardId(nonce, sharding.MetachainShardId) + if err != nil { + return nil + } + + for i, header := range headers { + if !bytes.Equal(header.GetPrevHash(), prevHash) { + continue + } + + hdrWithNonce, ok := header.(*block.MetaBlock) + if !ok { + continue + } + + t.mapHashHdr[string(hashes[i])] = hdrWithNonce + t.mapNonceHashes[hdrWithNonce.Nonce] = append(t.mapNonceHashes[hdrWithNonce.Nonce], string(hashes[i])) + return hdrWithNonce + } + + return nil +} + +// call only if mutex is locked before +func (t *trigger) getHeaderWithNonceAndPrevHash(nonce uint64, prevHash []byte) (*block.MetaBlock, error) { + metaHdr := t.getHeaderWithNonceAndPrevHashFromMaps(nonce, prevHash) + if metaHdr != nil { + return metaHdr, nil + } + + metaHdr = t.getHeaderWithNonceAndPrevHashFromCache(nonce, prevHash) + if metaHdr != nil { + return metaHdr, nil + } + + nonceToByteSlice := t.uint64Converter.ToByteSlice(nonce) + dataHdr, err := t.metaNonceHdrStorage.Get(nonceToByteSlice) + if err != nil || len(dataHdr) == 0 { + go t.requestHandler.RequestMetaHeaderByNonce(nonce) + return nil, err + } + + var neededHash []byte + err = t.marshalizer.Unmarshal(neededHash, dataHdr) + if err != nil { + return nil, err + } + + return t.getHeaderWithNonceAndHash(nonce, neededHash) +} + +// SetProcessed sets start of epoch to false and cleans underlying structure +func (t *trigger) SetProcessed(header data.HeaderHandler) { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + shardHdr, ok := header.(*block.Header) + if !ok { + return + } + + if !shardHdr.IsStartOfEpochBlock() { + return + } + + t.isEpochStart = false + t.newEpochHdrReceived = false + t.epochMetaBlockHash = shardHdr.EpochStartMetaHash + + t.epochStartNotifier.NotifyAll(shardHdr) + + t.mapHashHdr = make(map[string]*block.MetaBlock) + t.mapNonceHashes = make(map[uint64][]string) + t.mapEpochStartHdrs = make(map[string]*block.MetaBlock) +} + +// Revert sets the start of epoch back to true +func (t *trigger) Revert() { + t.mutTrigger.Lock() + defer t.mutTrigger.Unlock() + + t.isEpochStart = true + t.newEpochHdrReceived = true +} + +// EpochStartMetaHdrHash returns the announcing meta header hash which created the new epoch +func (t *trigger) EpochStartMetaHdrHash() []byte { + t.mutTrigger.RLock() + defer t.mutTrigger.RUnlock() + + return t.epochMetaBlockHash +} + +// Update updates the end-of-epoch trigger +func (t *trigger) Update(_ uint64) { +} + +// SetFinalityAttestingRound sets the round which finalized the start of epoch block +func (t *trigger) SetFinalityAttestingRound(_ uint64) { +} + +// SetCurrentEpochStartRound sets the round when the current epoch started +func (t *trigger) SetCurrentEpochStartRound(_ uint64) { +} + +// IsInterfaceNil returns true if underlying object is nil +func (t *trigger) IsInterfaceNil() bool { + return t == nil +} diff --git a/epochStart/shardchain/trigger_test.go b/epochStart/shardchain/trigger_test.go new file mode 100644 index 00000000000..7ea36fe7b25 --- /dev/null +++ b/epochStart/shardchain/trigger_test.go @@ -0,0 +1,273 @@ +package shardchain + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/epochStart/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" +) + +func createMockShardEpochStartTriggerArguments() *ArgsShardEpochStartTrigger { + return &ArgsShardEpochStartTrigger{ + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &mock.HasherMock{}, + HeaderValidator: &mock.HeaderValidatorStub{ + IsHeaderConstructionValidCalled: func(currHdr, prevHdr data.HeaderHandler) error { + return nil + }, + }, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + DataPool: &mock.PoolsHolderStub{ + HeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} + }, + }, + Storage: &mock.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + GetCalled: func(key []byte) (bytes []byte, err error) { + return []byte("hash"), nil + }, + PutCalled: func(key, data []byte) error { + return nil + }, + } + }, + }, + RequestHandler: &mock.RequestHandlerStub{}, + EpochStartNotifier: &mock.EpochStartNotifierStub{}, + } +} + +func TestNewEpochStartTrigger_NilArgumentsShouldErr(t *testing.T) { + t.Parallel() + + epochStartTrigger, err := NewEpochStartTrigger(nil) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilArgsNewShardEpochStartTrigger, err) +} + +func TestNewEpochStartTrigger_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Hasher = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilHasher, err) +} + +func TestNewEpochStartTrigger_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Marshalizer = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilMarshalizer, err) +} + +func TestNewEpochStartTrigger_NilHeaderShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.HeaderValidator = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilHeaderValidator, err) +} + +func TestNewEpochStartTrigger_NilDataPoolShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.DataPool = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilDataPoolsHolder, err) +} + +func TestNewEpochStartTrigger_NilStorageShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Storage = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilStorageService, err) +} + +func TestNewEpochStartTrigger_NilRequestHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.RequestHandler = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilRequestHandler, err) +} + +func TestNewEpochStartTrigger_NilUint64ConverterShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Uint64Converter = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilUint64Converter, err) +} + +func TestNewEpochStartTrigger_NilEpochStartNotifierShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.EpochStartNotifier = nil + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilEpochStartNotifier, err) +} + +func TestNewEpochStartTrigger_NilMetaBlockUnitShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Storage = &mock.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return nil + }, + } + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilMetaHdrStorage, err) +} + +func TestNewEpochStartTrigger_NilMetaNonceHashStorageShouldErr(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Storage = &mock.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + switch unitType { + case dataRetriever.MetaHdrNonceHashDataUnit: + return nil + default: + return &mock.StorerStub{} + } + }, + } + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.Nil(t, epochStartTrigger) + assert.Equal(t, epochStart.ErrNilMetaNonceHashStorage, err) +} + +func TestNewEpochStartTrigger_ShouldOk(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + epochStartTrigger, err := NewEpochStartTrigger(args) + + assert.NotNil(t, epochStartTrigger) + assert.Nil(t, err) +} + +func TestTrigger_ReceivedHeaderNotEpochStart(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Validity = 2 + args.Finality = 2 + epochStartTrigger, _ := NewEpochStartTrigger(args) + + hash := []byte("hash") + header := &block.MetaBlock{Nonce: 100} + header.EpochStart.LastFinalizedHeaders = []block.EpochStartShardData{{ShardId: 0, RootHash: hash, HeaderHash: hash}} + epochStartTrigger.ReceivedHeader(header) + + assert.False(t, epochStartTrigger.IsEpochStart()) +} + +func TestTrigger_ReceivedHeaderIsEpochStartTrue(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Validity = 1 + args.Finality = 2 + epochStartTrigger, _ := NewEpochStartTrigger(args) + + oldEpHeader := &block.MetaBlock{Nonce: 99, Epoch: 0} + prevHash, _ := core.CalculateHash(args.Marshalizer, args.Hasher, oldEpHeader) + + hash := []byte("hash") + header := &block.MetaBlock{Nonce: 100, Epoch: 1, PrevHash: prevHash} + header.EpochStart.LastFinalizedHeaders = []block.EpochStartShardData{{ShardId: 0, RootHash: hash, HeaderHash: hash}} + epochStartTrigger.ReceivedHeader(header) + epochStartTrigger.ReceivedHeader(oldEpHeader) + + prevHash, _ = core.CalculateHash(args.Marshalizer, args.Hasher, header) + header = &block.MetaBlock{Nonce: 101, Epoch: 1, PrevHash: prevHash} + epochStartTrigger.ReceivedHeader(header) + + prevHash, _ = core.CalculateHash(args.Marshalizer, args.Hasher, header) + header = &block.MetaBlock{Nonce: 102, Epoch: 1, PrevHash: prevHash} + epochStartTrigger.ReceivedHeader(header) + + assert.True(t, epochStartTrigger.IsEpochStart()) +} + +func TestTrigger_Epoch(t *testing.T) { + t.Parallel() + + epoch := uint32(1) + args := createMockShardEpochStartTriggerArguments() + args.Epoch = epoch + epochStartTrigger, _ := NewEpochStartTrigger(args) + + currentEpoch := epochStartTrigger.Epoch() + assert.Equal(t, epoch, currentEpoch) +} + +func TestTrigger_ProcessedAndRevert(t *testing.T) { + t.Parallel() + + args := createMockShardEpochStartTriggerArguments() + args.Validity = 0 + args.Finality = 0 + args.EpochStartNotifier = &mock.EpochStartNotifierStub{NotifyAllCalled: func(hdr data.HeaderHandler) {}} + et, _ := NewEpochStartTrigger(args) + + hash := []byte("hash") + epochStartRound := uint64(100) + header := &block.MetaBlock{Nonce: 100, Round: epochStartRound, Epoch: 1} + header.EpochStart.LastFinalizedHeaders = []block.EpochStartShardData{{ShardId: 0, RootHash: hash, HeaderHash: hash}} + et.ReceivedHeader(header) + header = &block.MetaBlock{Nonce: 101, Round: epochStartRound + 1, Epoch: 1} + et.ReceivedHeader(header) + + assert.True(t, et.IsEpochStart()) + assert.Equal(t, epochStartRound, et.EpochStartRound()) + + et.SetProcessed(&block.Header{EpochStartMetaHash: []byte("metahash")}) + assert.False(t, et.isEpochStart) + assert.False(t, et.newEpochHdrReceived) + + et.Revert() + assert.True(t, et.isEpochStart) + assert.True(t, et.newEpochHdrReceived) +} diff --git a/facade/elrondNodeFacade.go b/facade/elrondNodeFacade.go index 18b45b3ba09..016b6317f25 100644 --- a/facade/elrondNodeFacade.go +++ b/facade/elrondNodeFacade.go @@ -82,12 +82,6 @@ func (ef *ElrondNodeFacade) StartNode() error { return err } -// GetCurrentPublicKey is just a mock method to satisfies FacadeHandler -//TODO: Remove this method when it will not be used in elrond facade -func (ef *ElrondNodeFacade) GetCurrentPublicKey() string { - return "" -} - // StartBackgroundServices starts all background services needed for the correct functionality of the node func (ef *ElrondNodeFacade) StartBackgroundServices() { go ef.startRest() @@ -117,21 +111,6 @@ func (ef *ElrondNodeFacade) RestApiInterface() string { return ef.config.RestApiInterface } -// PrometheusMonitoring returns if prometheus is enabled for monitoring by the flag -func (ef *ElrondNodeFacade) PrometheusMonitoring() bool { - return ef.config.Prometheus -} - -// PrometheusJoinURL will return the join URL from server.toml -func (ef *ElrondNodeFacade) PrometheusJoinURL() string { - return ef.config.PrometheusJoinURL -} - -// PrometheusNetworkID will return the NetworkID from config.toml or the flag -func (ef *ElrondNodeFacade) PrometheusNetworkID() string { - return ef.config.PrometheusJobName -} - func (ef *ElrondNodeFacade) startRest() { log.Trace("starting REST api server") @@ -163,12 +142,11 @@ func (ef *ElrondNodeFacade) CreateTransaction( senderHex string, gasPrice uint64, gasLimit uint64, - data string, + txData []byte, signatureHex string, - challenge string, ) (*transaction.Transaction, error) { - return ef.node.CreateTransaction(nonce, value, receiverHex, senderHex, gasPrice, gasLimit, data, signatureHex, challenge) + return ef.node.CreateTransaction(nonce, value, receiverHex, senderHex, gasPrice, gasLimit, txData, signatureHex) } // ValidatorStatisticsApi will return the statistics for all validators @@ -184,11 +162,11 @@ func (ef *ElrondNodeFacade) SendTransaction( value string, gasPrice uint64, gasLimit uint64, - transactionData string, + txData []byte, signature []byte, ) (string, error) { - return ef.node.SendTransaction(nonce, senderHex, receiverHex, value, gasPrice, gasLimit, transactionData, signature) + return ef.node.SendTransaction(nonce, senderHex, receiverHex, value, gasPrice, gasLimit, txData, signature) } // SendBulkTransactions will send a bulk of transactions on the topic channel diff --git a/facade/elrondNodeFacade_test.go b/facade/elrondNodeFacade_test.go index 3a93cdab6c8..7343877179e 100644 --- a/facade/elrondNodeFacade_test.go +++ b/facade/elrondNodeFacade_test.go @@ -224,12 +224,12 @@ func TestElrondNodeFacade_SetSyncer(t *testing.T) { func TestElrondNodeFacade_SendTransaction(t *testing.T) { called := 0 node := &mock.NodeMock{} - node.SendTransactionHandler = func(nonce uint64, sender string, receiver string, amount string, code string, signature []byte) (string, error) { + node.SendTransactionHandler = func(nonce uint64, sender string, receiver string, amount string, txData []byte, signature []byte) (string, error) { called++ return "", nil } ef := createElrondNodeFacadeWithMockResolver(node) - _, _ = ef.SendTransaction(1, "test", "test", "0", 0, 0, "code", []byte{}) + _, _ = ef.SendTransaction(1, "test", "test", "0", 0, 0, []byte("code"), []byte{}) assert.Equal(t, called, 1) } diff --git a/facade/interface.go b/facade/interface.go index 3e8c174b720..463b177b09e 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -30,10 +30,10 @@ type NodeWrapper interface { //CreateTransaction will return a transaction from all needed fields CreateTransaction(nonce uint64, value string, receiverHex string, senderHex string, gasPrice uint64, - gasLimit uint64, data string, signatureHex string, challenge string) (*transaction.Transaction, error) + gasLimit uint64, data []byte, signatureHex string) (*transaction.Transaction, error) //SendTransaction will send a new transaction on the 'send transactions pipe' channel - SendTransaction(nonce uint64, senderHex string, receiverHex string, value string, gasPrice uint64, gasLimit uint64, transactionData string, signature []byte) (string, error) + SendTransaction(nonce uint64, senderHex string, receiverHex string, value string, gasPrice uint64, gasLimit uint64, transactionData []byte, signature []byte) (string, error) //SendBulkTransactions will send a bulk of transactions on the 'send transactions pipe' channel SendBulkTransactions(txs []*transaction.Transaction) (uint64, error) diff --git a/facade/mock/nodeMock.go b/facade/mock/nodeMock.go index cd904981646..4c90c55d049 100644 --- a/facade/mock/nodeMock.go +++ b/facade/mock/nodeMock.go @@ -19,9 +19,9 @@ type NodeMock struct { GetBalanceHandler func(address string) (*big.Int, error) GenerateTransactionHandler func(sender string, receiver string, amount string, code string) (*transaction.Transaction, error) CreateTransactionHandler func(nonce uint64, value string, receiverHex string, senderHex string, gasPrice uint64, - gasLimit uint64, data string, signatureHex string, challenge string) (*transaction.Transaction, error) + gasLimit uint64, data []byte, signatureHex string) (*transaction.Transaction, error) GetTransactionHandler func(hash string) (*transaction.Transaction, error) - SendTransactionHandler func(nonce uint64, sender string, receiver string, amount string, code string, signature []byte) (string, error) + SendTransactionHandler func(nonce uint64, sender string, receiver string, amount string, txData []byte, signature []byte) (string, error) SendBulkTransactionsHandler func(txs []*transaction.Transaction) (uint64, error) GetAccountHandler func(address string) (*state.Account, error) GetCurrentPublicKeyHandler func() string @@ -64,16 +64,16 @@ func (nm *NodeMock) GenerateTransaction(sender string, receiver string, amount s } func (nm *NodeMock) CreateTransaction(nonce uint64, value string, receiverHex string, senderHex string, gasPrice uint64, - gasLimit uint64, data string, signatureHex string, challenge string) (*transaction.Transaction, error) { + gasLimit uint64, data []byte, signatureHex string) (*transaction.Transaction, error) { - return nm.CreateTransactionHandler(nonce, value, receiverHex, senderHex, gasPrice, gasLimit, data, signatureHex, challenge) + return nm.CreateTransactionHandler(nonce, value, receiverHex, senderHex, gasPrice, gasLimit, data, signatureHex) } func (nm *NodeMock) GetTransaction(hash string) (*transaction.Transaction, error) { return nm.GetTransactionHandler(hash) } -func (nm *NodeMock) SendTransaction(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, transactionData string, signature []byte) (string, error) { +func (nm *NodeMock) SendTransaction(nonce uint64, sender string, receiver string, value string, gasPrice uint64, gasLimit uint64, transactionData []byte, signature []byte) (string, error) { return nm.SendTransactionHandler(nonce, sender, receiver, value, transactionData, signature) } diff --git a/go.mod b/go.mod index fefd896a048..e8b9c873552 100644 --- a/go.mod +++ b/go.mod @@ -4,16 +4,16 @@ go 1.12 require ( github.com/360EntSecGroup-Skylar/excelize v1.4.1 - github.com/ElrondNetwork/arwen-wasm-vm v0.3.2 + github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191227132001-161cbe6d664b github.com/ElrondNetwork/concurrent-map v0.1.2 github.com/ElrondNetwork/elrond-vm v0.0.24 github.com/ElrondNetwork/elrond-vm-common v0.1.6 github.com/beevik/ntp v0.2.0 github.com/boltdb/bolt v1.3.1 - github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c + github.com/btcsuite/btcd v0.20.1-beta github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d github.com/cornelk/hashmap v1.0.1-0.20190121140111-33e58823eb9d - github.com/dgraph-io/badger v1.6.0-rc1 + github.com/dgraph-io/badger v1.6.0 github.com/elastic/go-elasticsearch/v7 v7.1.0 github.com/gin-contrib/cors v0.0.0-20190301062745-f9e10995c85a github.com/gin-contrib/pprof v1.2.0 @@ -22,38 +22,36 @@ require ( github.com/glycerine/go-capnproto v0.0.0-20190118050403-2d07de3aa7fc github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect github.com/glycerine/rbtree v0.0.0-20180524195614-80eebfe947f7 // indirect - github.com/gogo/protobuf v1.3.0 - github.com/golang/protobuf v1.3.1 + github.com/gogo/protobuf v1.3.1 + github.com/golang/protobuf v1.3.2 github.com/google/gops v0.3.6 github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect - github.com/gorilla/websocket v1.4.0 + github.com/gorilla/websocket v1.4.1 github.com/hashicorp/golang-lru v0.5.3 github.com/ipfs/go-log v0.0.1 github.com/jbenet/goprocess v0.1.3 github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/libp2p/go-libp2p v0.3.1 - github.com/libp2p/go-libp2p-core v0.2.2 - github.com/libp2p/go-libp2p-discovery v0.1.0 - github.com/libp2p/go-libp2p-kad-dht v0.2.1 - github.com/libp2p/go-libp2p-kbucket v0.2.1 - github.com/libp2p/go-libp2p-pubsub v0.1.1 - github.com/minio/sha256-simd v0.1.0 - github.com/mr-tron/base58 v1.1.2 - github.com/multiformats/go-multiaddr v0.0.4 + github.com/libp2p/go-libp2p v0.5.0 + github.com/libp2p/go-libp2p-core v0.3.0 + github.com/libp2p/go-libp2p-discovery v0.2.0 + github.com/libp2p/go-libp2p-kad-dht v0.4.1 + github.com/libp2p/go-libp2p-kbucket v0.2.2 + github.com/libp2p/go-libp2p-pubsub v0.2.5 + github.com/minio/sha256-simd v0.1.1 + github.com/mr-tron/base58 v1.1.3 + github.com/multiformats/go-multiaddr v0.2.0 github.com/pelletier/go-toml v1.2.0 github.com/pkg/errors v0.8.1 github.com/pkg/profile v1.3.0 - github.com/prometheus/client_golang v1.0.0 github.com/satori/go.uuid v1.2.0 github.com/shirou/gopsutil v0.0.0-20190731134726-d80c43f9c984 github.com/stretchr/testify v1.4.0 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 github.com/urfave/cli v1.20.0 - github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc + github.com/whyrusleeping/go-logging v0.0.1 github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee go.dedis.ch/kyber/v3 v3.0.7 - golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 - golang.org/x/sys v0.0.0-20190825160603-fb81701db80f // indirect + golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 gopkg.in/go-playground/validator.v8 v8.18.2 ) diff --git a/go.sum b/go.sum index 2adf82f4383..85964c4cead 100644 --- a/go.sum +++ b/go.sum @@ -6,35 +6,22 @@ github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkBy github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191128093743-6e3c28adc83f h1:Nolb0ayAvd6YK8wpjg6jZ87f4ZCMQY6tQK2fE6QShUE= -github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191128093743-6e3c28adc83f/go.mod h1:yheUY3vVWupL8pe/P+W45Rb0V51sa3hRwslNlaJOxow= -github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191129083049-4cf5d04347f4 h1:MEuEzVeyoCYTgI1ZpE4svb/bF0jnJQRV1qIlXUdA/Ys= -github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191129083049-4cf5d04347f4/go.mod h1:MrMGsLuPpidpWNZZETK1qI8rppRRjIaeEk77CrpGylo= -github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191129150346-a0d43c8d42a1 h1:mhR1v4dP7HzfxUZBf7K1Y++ATLAwBW1bJ3vpnAt56Ug= -github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191129150346-a0d43c8d42a1/go.mod h1:MrMGsLuPpidpWNZZETK1qI8rppRRjIaeEk77CrpGylo= -github.com/ElrondNetwork/arwen-wasm-vm v0.3.2 h1:EZmX7euEvAPsCfQSaz4pwJaR/MiNDu5o6oLW+colV7U= -github.com/ElrondNetwork/arwen-wasm-vm v0.3.2/go.mod h1:5AlNE9+mwvuFF5FzGRceJKMKxpTQDlMKO6ZEPuNZBSA= +github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191227132001-161cbe6d664b h1:s5kZKAg5kWwMsvASAtApZqObPpWs2MPGwmgZxqXy5zc= +github.com/ElrondNetwork/arwen-wasm-vm v0.0.0-20191227132001-161cbe6d664b/go.mod h1:5AlNE9+mwvuFF5FzGRceJKMKxpTQDlMKO6ZEPuNZBSA= github.com/ElrondNetwork/big-int-util v0.0.5 h1:e/9kK++9ZH/SdIYqLSUPRFYrDZmDWDgff3/7SCydq5I= github.com/ElrondNetwork/big-int-util v0.0.5/go.mod h1:96viBvoTXLjZOhEvE0D+QnAwg1IJLPAK6GVHMbC7Aw4= github.com/ElrondNetwork/concurrent-map v0.1.2 h1:mr2sVF2IPDsJO8DNGzCUiNQOJcadHuIRVZn+QFnCBlE= github.com/ElrondNetwork/concurrent-map v0.1.2/go.mod h1:3XwSwn4JHI0lrKxWLZvtp53Emr8BXYTmNQGwcukHJEE= -github.com/ElrondNetwork/elrond-vm v0.0.23 h1:ylwmNQMeRRJoJ8ujamvf9XAsmLAqcpMgqqNuV90J8eY= -github.com/ElrondNetwork/elrond-vm v0.0.23/go.mod h1:GfPeaG0Rp6K5wNiIiS54xQ547QxOAOgylUktZKTBA8E= github.com/ElrondNetwork/elrond-vm v0.0.24 h1:AbR35H7w0wZQt+607cqMmxTiyHNq/cr07d1Ty7vG+d8= github.com/ElrondNetwork/elrond-vm v0.0.24/go.mod h1:cqYk2wq8yndnrgOjB81R/QCruaC+F+hMf8R6C6Y0ewA= github.com/ElrondNetwork/elrond-vm-common v0.0.0-20191203115206-691b00a6e76a h1:/hGeyqQxE0E92BkMDs/1E6vRqJMA4nVhwSFol1dNopk= github.com/ElrondNetwork/elrond-vm-common v0.0.0-20191203115206-691b00a6e76a/go.mod h1:ZakxPST/Wt8umnRtA9gobcy3Dw2bywxwkC54P5VhO9g= github.com/ElrondNetwork/elrond-vm-common v0.0.9 h1:Ff8vEJSKChRfmp+TVo7AgciRkMXjL4+TbNin6LQ7xKw= github.com/ElrondNetwork/elrond-vm-common v0.0.9/go.mod h1:VqCCN0cX0e4D/KDc7MGNV9ElrOsfnjuJnGvcODVjzbk= -github.com/ElrondNetwork/elrond-vm-common v0.1.3 h1:b9KnmIDQx1Warrf2Hn9wtv5H2zMT0Qvq+bxulc++PUw= -github.com/ElrondNetwork/elrond-vm-common v0.1.3/go.mod h1:ZakxPST/Wt8umnRtA9gobcy3Dw2bywxwkC54P5VhO9g= -github.com/ElrondNetwork/elrond-vm-common v0.1.4 h1:bBkALg70U/tLXXRhoTTWT50QN7DSyq4VuJd7aiVWuQA= -github.com/ElrondNetwork/elrond-vm-common v0.1.4/go.mod h1:ZakxPST/Wt8umnRtA9gobcy3Dw2bywxwkC54P5VhO9g= github.com/ElrondNetwork/elrond-vm-common v0.1.5 h1:JRMK3tgLGFaHgMjOwvL+zzhXapv4GQ9G7bpNOWlxP8Y= github.com/ElrondNetwork/elrond-vm-common v0.1.5/go.mod h1:ZakxPST/Wt8umnRtA9gobcy3Dw2bywxwkC54P5VhO9g= github.com/ElrondNetwork/elrond-vm-common v0.1.6 h1:NaK6rTjCW20s7/X3JI/Ui4RSdBoAWb59UKSAFbJuGl8= github.com/ElrondNetwork/elrond-vm-common v0.1.6/go.mod h1:ZakxPST/Wt8umnRtA9gobcy3Dw2bywxwkC54P5VhO9g= -github.com/ElrondNetwork/elrond-vm-util v0.1.0/go.mod h1:2dLgpzmy1PkmtSVWjM2rEPoQkKv/y7+F2vMsDCRtHmg= github.com/ElrondNetwork/elrond-vm-util v0.1.1/go.mod h1:02LPKFh/Z5rbejgW2dazwjWGnsniuLOhRM2JjaOA3Mg= github.com/ElrondNetwork/go-ext-wasm v0.1.0 h1:aSXdg60JLQMSYdQbq0VtqpoyMdFYZqz09BcGiKwGY5U= github.com/ElrondNetwork/go-ext-wasm v0.1.0/go.mod h1:wlns4D0OzJP+q/cLweFz1rGpoZOG8LbGYX2jD31HCgw= @@ -45,18 +32,24 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIO github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/beevik/ntp v0.2.0 h1:sGsd+kAXzT0bfVfzJfce04g+dSRfrs+tbQW8lweuYgw= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c h1:aEbSeNALREWXk0G7UdNhR3ayBV7tZ4M2PNmnrCAph6Q= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= @@ -84,6 +77,8 @@ github.com/dchest/siphash v1.1.0/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBl github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1 h1:JphPpoBZJ3WHha133BGYlQqltSGIhV+VsEID0++nN9A= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= +github.com/dgraph-io/badger v1.6.0 h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -112,7 +107,9 @@ github.com/glycerine/rbtree v0.0.0-20180524195614-80eebfe947f7/go.mod h1:tf1G9WL github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -120,6 +117,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -128,6 +127,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -140,10 +141,14 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= @@ -159,21 +164,28 @@ github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4 h1:UlfXKrZx1DjZoBhQHmNHLC1fK1dUJDN20Y28A7s+gJ8= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0 h1:TOxI04l8CmO4zGtesENhzm4PwkFwJXY3rKiYaaMf9fI= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= +github.com/ipfs/go-datastore v0.3.1 h1:SS1t869a6cctoSYmZXUk8eL6AzVXgASmKIWFNQkQ1jU= +github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-todocounter v0.0.1 h1:kITWA5ZcQZfrUnDNkRn04Xzh0YFaDFXsoO2A81Eb6Lw= -github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= +github.com/ipfs/go-todocounter v0.0.2 h1:9UBngSQhylg2UDcxSAtpkT+rEWFr26hDPXVStE8LFyc= +github.com/ipfs/go-todocounter v0.0.2/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= @@ -189,6 +201,7 @@ github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZl github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -206,8 +219,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -221,63 +234,77 @@ github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOS github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0 h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-flow-metrics v0.0.1 h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-libp2p v0.3.1 h1:opd8/1Sm9zFG37LzNQsIzMTMeBabhlcX5VlvLrNZPV0= -github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= -github.com/libp2p/go-libp2p-autonat v0.1.0 h1:aCWAu43Ri4nU0ZPO7NyLzUvvfqd0nE3dX0R/ZGYVgOU= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= +github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p v0.4.2/go.mod h1:MNmgUxUw5pMsdOzMlT0EE7oKjRasl+WyVwM0IBlpKgQ= +github.com/libp2p/go-libp2p v0.5.0 h1:/nnb5mc2TK6TwknECsWIkfCwMTHv0AXbvzxlnVivfeg= +github.com/libp2p/go-libp2p v0.5.0/go.mod h1:Os7a5Z3B+ErF4v7zgIJ7nBHNu2LYt8ZMLkTQUB3G/wA= +github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI= +github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.3 h1:0KycuXvPDhmehw0ASsg+s1o3IfXgCUDqfzAl94KEBOg= -github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= -github.com/libp2p/go-libp2p-circuit v0.1.1 h1:eopfG9fAg6rEHWQO1TSrLosXDgYbbbu/RTva/tBANus= -github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= +github.com/libp2p/go-libp2p-blankhost v0.1.4 h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk= +github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= +github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf/Kr6hZJj8= +github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.0.6/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= github.com/libp2p/go-libp2p-core v0.2.2 h1:Sv1ggdoMx9c7v7FOFkR7agraHCnAgqYsXrU1ARSRUMs= github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= +github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-discovery v0.1.0 h1:j+R6cokKcGbnZLf4kcNwpx6mDEUPF3N6SrqMymQhmvs= github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-kad-dht v0.2.1 h1:+pb1DCkV/6oNQjTZVXl+Y++eV0rnelx/L8y1t4J+Rnw= -github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kbucket v0.2.1 h1:q9Jfwww9XnXc1K9dyYuARJxJvIvhgYVaQCuziO/dF3c= -github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= +github.com/libp2p/go-libp2p-discovery v0.2.0 h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY= +github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= +github.com/libp2p/go-libp2p-kad-dht v0.4.1 h1:N++/IVD7KemtNqwoqBLsmpc1PxROW1cxi81ja+wsJCg= +github.com/libp2p/go-libp2p-kad-dht v0.4.1/go.mod h1:Qf5Ddk5Csgi657ja2u5+NugbWz/QOVeVfrM1HTRDcfQ= +github.com/libp2p/go-libp2p-kbucket v0.2.2 h1:Jg/JUbQix6mvTnj+86FasRqkh01JFQNrN+H26Gxxsg0= +github.com/libp2p/go-libp2p-kbucket v0.2.2/go.mod h1:opWrBZSWnBYPc315q497huxY3sz1t488X6OiXUEYWKA= github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1 h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-nat v0.0.4 h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= +github.com/libp2p/go-libp2p-nat v0.0.5 h1:/mH8pXFVKleflDL1YwqMg27W9GD8kjEx7NY0P6eGc98= +github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-pubsub v0.1.1 h1:phDnQvO3H3hAgaEEQi6yt3LILqIYVXaw05bxzezrEwQ= -github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-record v0.1.1 h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY= -github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-peerstore v0.1.4 h1:d23fvq5oYMJ/lkkbO4oTwBp/JP+I/1m5gZJobNXCE/k= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= +github.com/libp2p/go-libp2p-pubsub v0.2.5 h1:tPKbkjAUI0xLGN3KKTKKy9TQEviVfrP++zJgH5Muke4= +github.com/libp2p/go-libp2p-pubsub v0.2.5/go.mod h1:9Q2RRq8ofXkoewORcyVlgUFDKLKw7BuYSlJVWRcVk3Y= +github.com/libp2p/go-libp2p-record v0.1.2 h1:M50VKzWnmUrk/M5/Dz99qO9Xh4vs8ijsK+7HkJvRP+0= +github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-routing v0.1.0 h1:hFnj3WR3E2tOcKaGpyzfP4gvFZ3t8JkQmbapN0Ct+oU= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0 h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= +github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA= +github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.1 h1:9A8oQqPIZvbaRyrjViHeDYS7fE7fNtP7BRWdJrBHbe8= -github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= +github.com/libp2p/go-libp2p-swarm v0.2.2 h1:T4hUpgEs2r371PweU3DuH7EOmBIdTBCwWs+FLcgx3bQ= +github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.1.0 h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1 h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= @@ -292,10 +319,13 @@ github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6 github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-nat v0.0.3 h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= +github.com/libp2p/go-nat v0.0.4 h1:KbizNnq8YIf7+Hn7+VFL/xE0eDrkPru2zIO9NMwL8UQ= +github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-openssl v0.0.2 h1:9pP2d3Ubaxkv7ZisLjx9BFwgOGnQdQYnfcH29HNY3ls= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4= @@ -305,8 +335,11 @@ github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROm github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= github.com/libp2p/go-tcp-transport v0.1.0 h1:IGhowvEqyMFknOar4FWCKSWE0zL36UFKQtiRQD60/8o= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-ws-transport v0.1.0 h1:F+0OvvdmPTDsVc4AjPHjV7L7Pk1B7D5QwtDcKE2oag4= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= +github.com/libp2p/go-tcp-transport v0.1.1 h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw= +github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= +github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= +github.com/libp2p/go-ws-transport v0.2.0 h1:MJCw2OrPA9+76YNRvdo1wMnSOxb9Bivj6sVFY1Xrj6w= +github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3 h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= @@ -320,7 +353,6 @@ github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -330,6 +362,9 @@ github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+ github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0 h1:U41/2erhAKcmSI14xh/ZTUdBPOzDOIfS93ibzUSl8KM= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -345,27 +380,44 @@ github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVq github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.3 h1:P19q/k9jwmtgh+qXFkKfgFM7rCg/9l5AVqh7VNxSXhs= -github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= +github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= +github.com/multiformats/go-multiaddr-net v0.1.1 h1:jFFKUuXTXv+3ARyHZi3XUqQO+YWMKgBdhEvuGRfnL6s= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5 h1:1wxmCvTXAifAepIMyF39vZinRw5sbqjPs/UIi93+uik= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multistream v0.1.0 h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/multiformats/go-varint v0.0.1 h1:TR/0rdQtnNxuN2IhiB639xC3tWM4IUi7DkTBVTdGW/M= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= @@ -388,15 +440,13 @@ github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6J github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= @@ -406,8 +456,10 @@ github.com/shirou/gopsutil v0.0.0-20190731134726-d80c43f9c984 h1:wsZAb4P8F7uQSws github.com/shirou/gopsutil v0.0.0-20190731134726-d80c43f9c984/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= @@ -437,17 +489,15 @@ github.com/ugorji/go/codec v0.0.0-20181209151446-772ced7fd4c2 h1:EICbibRW4JNKMcY github.com/ugorji/go/codec v0.0.0-20181209151446-772ced7fd4c2/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= -github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/go-logging v0.0.1 h1:fwpzlmT0kRC/Fmd0MdmGgJG/CXIZ6gFq46FQZjprUcc= +github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= @@ -466,6 +516,8 @@ go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -475,9 +527,12 @@ golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -492,6 +547,7 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -513,10 +569,11 @@ golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190825160603-fb81701db80f h1:LCxigP8q3fPRGNVYndYsyHnF0zRrvcoVwZMfb8iQZe4= -golang.org/x/sys v0.0.0-20190825160603-fb81701db80f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -529,6 +586,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -537,6 +596,7 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= diff --git a/hashing/blake2b/blake2b.go b/hashing/blake2b/blake2b.go index f220b355dc5..c85ad02488f 100644 --- a/hashing/blake2b/blake2b.go +++ b/hashing/blake2b/blake2b.go @@ -24,7 +24,7 @@ func (b2b Blake2b) Compute(s string) []byte { } else { h, _ = blake2b.New(b2b.HashSize, nil) } - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -47,8 +47,5 @@ func (b2b Blake2b) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (b2b Blake2b) IsInterfaceNil() bool { - if &b2b == nil { - return true - } return false } diff --git a/hashing/fnv/fnv.go b/hashing/fnv/fnv.go index 6613348cfbc..031a8872d14 100644 --- a/hashing/fnv/fnv.go +++ b/hashing/fnv/fnv.go @@ -16,7 +16,7 @@ func (f Fnv) Compute(s string) []byte { return f.EmptyHash() } h := fnv.New128a() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -35,8 +35,5 @@ func (Fnv) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (f Fnv) IsInterfaceNil() bool { - if &f == nil { - return true - } return false } diff --git a/hashing/keccak/keccak.go b/hashing/keccak/keccak.go index 5ae7acafcef..cb72310ab38 100644 --- a/hashing/keccak/keccak.go +++ b/hashing/keccak/keccak.go @@ -16,7 +16,7 @@ func (k Keccak) Compute(s string) []byte { return k.EmptyHash() } h := sha3.NewLegacyKeccak256() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } diff --git a/hashing/sha256/sha256.go b/hashing/sha256/sha256.go index 525e9188d14..32b8daa4d5f 100644 --- a/hashing/sha256/sha256.go +++ b/hashing/sha256/sha256.go @@ -16,7 +16,7 @@ func (sha Sha256) Compute(s string) []byte { return sha.EmptyHash() } h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -35,8 +35,5 @@ func (Sha256) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (sha Sha256) IsInterfaceNil() bool { - if &sha == nil { - return true - } return false } diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index 32a094a26c3..bb113195a64 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -75,8 +75,8 @@ func initNodesAndTest( nodes[0][i].blkProcessor.ApplyBodyToHeaderCalled = func( header data.HeaderHandler, body data.BodyHandler, - ) error { - return process.ErrAccountStateDirty + ) (data.BodyHandler, error) { + return nil, process.ErrAccountStateDirty } nodes[0][i].blkProcessor.CreateBlockCalled = func( header data.HeaderHandler, diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 18607b8335e..158f0e94a6b 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -5,29 +5,33 @@ import ( "crypto/ecdsa" "encoding/hex" "fmt" + "io/ioutil" + "math/big" "math/rand" "strconv" "strings" "sync" "time" - "math/big" - + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus/round" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" "github.com/ElrondNetwork/elrond-go/data/trie" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/data/trie/evictionWaitingList" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" + "github.com/ElrondNetwork/elrond-go/epochStart/metachain" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/blake2b" "github.com/ElrondNetwork/elrond-go/hashing/sha256" @@ -176,9 +180,8 @@ func createTestBlockChain() data.ChainHandler { func createMemUnit() storage.Storer { cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.New() - unit, _ := storageUnit.NewStorageUnit(cache, persist) + unit, _ := storageUnit.NewStorageUnit(cache, memorydb.New()) return unit } @@ -194,24 +197,20 @@ func createTestStore() dataRetriever.StorageService { } func createTestShardDataPool() dataRetriever.PoolsHolder { - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + hdrPool, _ := headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg := storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 50000, Type: storageUnit.LRUCache} + trieNodes, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) currTxs, _ := dataPool.NewCurrentBlockPool() @@ -220,10 +219,9 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { uTxPool, rewardsTxPool, hdrPool, - hdrNonces, txBlockBody, peerChangeBlockBody, - metaBlocks, + trieNodes, currTxs, ) @@ -234,8 +232,21 @@ func createAccountsDB(marshalizer marshal.Marshalizer) state.AccountsAdapter { marsh := &marshal.JsonMarshalizer{} hasher := sha256.Sha256{} store := createMemUnit() + evictionWaitListSize := uint(100) + ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), marsh) + + // TODO change this implementation with a factory + tempDir, _ := ioutil.TempDir("", "integrationTests") + cfg := &config.DBConfig{ + FilePath: tempDir, + Type: string(storageUnit.LvlDbSerial), + BatchDelaySeconds: 4, + MaxBatchSize: 10000, + MaxOpenFiles: 10, + } + trieStorage, _ := trie.NewTrieStorageManager(store, cfg, ewl) - tr, _ := trie.NewTrie(store, marsh, hasher) + tr, _ := trie.NewTrie(trieStorage, marsh, hasher) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, marshalizer, &mock.AccountsFactoryStub{ CreateAccountCalled: func(address state.AddressContainer, tracker state.AccountTracker) (wrapper state.AccountHandler, e error) { return state.NewAccount(address, tracker) @@ -320,8 +331,8 @@ func createConsensusOnlyNode( CreateBlockCalled: func(header data.HeaderHandler, haveTime func() bool) (handler data.BodyHandler, e error) { return &dataBlock.Body{}, nil }, - ApplyBodyToHeaderCalled: func(header data.HeaderHandler, body data.BodyHandler) error { - return nil + ApplyBodyToHeaderCalled: func(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { + return body, nil }, MarshalizedDataToBroadcastCalled: func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) { mrsData := make(map[uint32][]byte) @@ -370,19 +381,40 @@ func createConsensusOnlyNode( time.Millisecond*time.Duration(roundTime), syncer) - forkDetector, _ := syncFork.NewShardForkDetector(rounder, timecache.NewTimeCache(time.Second), 0) + argsNewMetaEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: time.Unix(startTime, 0), + EpochStartNotifier: &mock.EpochStartNotifierStub{}, + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1, + RoundsPerEpoch: 3, + }, + Epoch: 0, + Storage: createTestStore(), + Marshalizer: testMarshalizer, + } + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsNewMetaEpochStart) + + forkDetector, _ := syncFork.NewShardForkDetector( + rounder, + timecache.NewTimeCache(time.Second), + &mock.BlockTrackerStub{}, + 0, + ) hdrResolver := &mock.HeaderResolverMock{} mbResolver := &mock.MiniBlocksResolverMock{} resolverFinder := &mock.ResolversFinderStub{ IntraShardResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { - if baseTopic == factory.HeadersTopic { - return hdrResolver, nil - } if baseTopic == factory.MiniBlocksTopic { return mbResolver, nil } - return hdrResolver, nil + return nil, nil + }, + CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, err error) { + if baseTopic == factory.ShardBlocksTopic { + return hdrResolver, nil + } + return nil, nil }, } @@ -408,7 +440,7 @@ func createConsensusOnlyNode( node.WithPrivKey(privKey), node.WithForkDetector(forkDetector), node.WithMessenger(messenger), - node.WithMarshalizer(testMarshalizer), + node.WithMarshalizer(testMarshalizer, 0), node.WithHasher(testHasher), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), @@ -426,6 +458,7 @@ func createConsensusOnlyNode( node.WithResolversFinder(resolverFinder), node.WithConsensusType(consensusType), node.WithBlackListHandler(&mock.BlackListHandlerStub{}), + node.WithEpochStartTrigger(epochStartTrigger), node.WithBootStorer(&mock.BoostrapStorerMock{}), node.WithRequestedItemsHandler(&mock.RequestedItemsHandlerStub{}), node.WithHeaderSigVerifier(&mock.HeaderSigVerifierStub{}), diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index b8cfe9fba2e..e3aa5ed0109 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -12,9 +12,11 @@ import ( "github.com/stretchr/testify/assert" ) +const mintingValue = "100000000" + func TestInterceptedTxFromFrontendLargeValue(t *testing.T) { value := big.NewInt(0) - value.SetString("1000999999999999999999991234", 10) + value.SetString("777", 10) fmt.Println(value.Text(10)) fmt.Println(value.Text(16)) @@ -23,12 +25,12 @@ func TestInterceptedTxFromFrontendLargeValue(t *testing.T) { t, 0, value, - "c2981474860ebd42f9da812a41dcace8a0c2fdac52e3a66a45603821ca4c6d43", - "c2981474860ebd42f9da812a41dcace8a0c2fdac52e3a66a45603821ca4c6d43", - "469d44b058faadb56cabbc696f2a0f5c9d4a361b3432c37135d6216feb03fcce890ebc3b98d1506be0cf88f5f22ad533a90386b2211aaad6df32a41be4b01e09", + "53669be65aac358a6add8e8a8b1251bb994dc1e4a0cc885956f5ecd53396f0d8", + "2d7aa683fbb37eafc2426bfe63e1c20aa5872ee4627c51b6789f41bfb8d31fdb", + "a18a6c6647d10a579acd7e39258f38cee4cd36998ae12edf4e884066231b00e18d792cc14ece72d3ac6fb26281c5419b1ec9736291d1c9fbb312ee2a730c8103", 10, - 1002, - "de", + 100000, + []byte("a@b@c!!$%^<>#!"), ) } @@ -43,7 +45,7 @@ func testInterceptedTxFromFrontendGeneratedParams( frontendSignature string, frontendGasPrice uint64, frontendGasLimit uint64, - frontendData string, + frontendData []byte, ) { if testing.Short() { t.Skip("this is not a short test") @@ -55,8 +57,8 @@ func testInterceptedTxFromFrontendGeneratedParams( nodeShardId := uint32(0) txSignPrivKeyShardId := uint32(0) initialNodeAddr := "nodeAddr" - valMinting := big.NewInt(0).Set(frontendValue) - valMinting.Mul(valMinting, big.NewInt(2)) + valMinting, _ := big.NewInt(0).SetString(mintingValue, 10) + valMinting.Mul(valMinting, big.NewInt(5)) node := integrationTests.NewTestProcessorNode( maxShards, diff --git a/integrationTests/interface.go b/integrationTests/interface.go index dddf184a950..15aafc78ad7 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -1,6 +1,9 @@ package integrationTests -import "github.com/ElrondNetwork/elrond-go/process" +import ( + "github.com/ElrondNetwork/elrond-go/epochStart" + "github.com/ElrondNetwork/elrond-go/process" +) // TestBootstrapper extends the Bootstrapper interface with some functions intended to be used only in tests // as it simplifies the reproduction of edge cases @@ -9,3 +12,12 @@ type TestBootstrapper interface { RollBack(revertUsingForkNonce bool) error SetProbableHighestNonce(nonce uint64) } + +// TestEpochStartTrigger extends the epochStart trigger interface with some functions intended to by used only +// in tests as it simplifies the reproduction of test scenarios +type TestEpochStartTrigger interface { + epochStart.TriggerHandler + GetRoundsPerEpoch() uint64 + SetTrigger(triggerHandler epochStart.TriggerHandler) + SetRoundsPerEpoch(roundsPerEpoch uint64) +} diff --git a/integrationTests/longTests/executingSCTransactions_test.go b/integrationTests/longTests/executingSCTransactions_test.go index 62875dbc901..68f79feb55c 100644 --- a/integrationTests/longTests/executingSCTransactions_test.go +++ b/integrationTests/longTests/executingSCTransactions_test.go @@ -9,9 +9,12 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage/mock" "github.com/pkg/profile" "github.com/stretchr/testify/assert" ) @@ -74,7 +77,7 @@ func TestProcessesJoinGameTheSamePlayerMultipleTimesRewardAndEndgameInMultipleRo integrationTests.MintAllNodes(nodes, initialVal) integrationTests.MintAllPlayers(nodes, players, initialVal) - integrationTests.DeployScTx(nodes, idxProposer, string(scCode)) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) time.Sleep(stepDelay) integrationTests.ProposeBlock(nodes, []int{idxProposer}, round, nonce) integrationTests.SyncBlock(t, nodes, []int{idxProposer}, round) @@ -155,7 +158,7 @@ func TestProcessesJoinGame100PlayersMultipleTimesRewardAndEndgameInMultipleRound integrationTests.MintAllNodes(nodes, initialVal) integrationTests.MintAllPlayers(nodes, players, initialVal) - integrationTests.DeployScTx(nodes, idxProposer, string(scCode)) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) time.Sleep(stepDelay) integrationTests.ProposeBlock(nodes, []int{idxProposer}, round, nonce) integrationTests.SyncBlock(t, nodes, []int{idxProposer}, round) @@ -246,7 +249,7 @@ func TestProcessesJoinGame100PlayersMultipleTimesRewardAndEndgameInMultipleRound nrRoundsToPropagateMultiShard = 1 } - integrationTests.DeployScTx(nodes, idxProposer, string(scCode)) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) time.Sleep(stepDelay) for i := 0; i < nrRoundsToPropagateMultiShard; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) @@ -342,7 +345,7 @@ func TestProcessesJoinGame100PlayersMultipleTimesRewardAndEndgameInMultipleRound idxProposers[1] = 2 idxProposers[2] = 4 - integrationTests.DeployScTx(nodes, idxProposer, string(scCode)) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) time.Sleep(stepDelay) for i := 0; i < nrRoundsToPropagateMultiShard; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) @@ -432,7 +435,7 @@ func runMultipleRoundsOfTheGame( round, nonce = integrationTests.ProposeAndSyncBlocks(t, nodes, idxProposers, round, nonce) - fmt.Println(rMonitor.GenerateStatistics()) + fmt.Println(rMonitor.GenerateStatistics(&config.Config{AccountsTrieStorage: config.StorageConfig{DB: config.DBConfig{}}}, &mock.PathManagerStub{}, "")) } integrationTests.CheckRewardsDistribution(t, nodes, players, topUpValue, totalWithdrawValue, diff --git a/integrationTests/mock/DbMock.go b/integrationTests/mock/DbMock.go index 3919c73317c..33e1e543971 100644 --- a/integrationTests/mock/DbMock.go +++ b/integrationTests/mock/DbMock.go @@ -31,6 +31,10 @@ func (MockDB) Destroy() error { return nil } +func (MockDB) DestroyClosed() error { + return nil +} + // IsInterfaceNil returns true if there is no value under the interface func (s MockDB) IsInterfaceNil() bool { if &s == nil { diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 7fbd9af62a2..7060a8ad2f6 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorMock mocks the implementation for a blockProcessor @@ -17,7 +18,7 @@ type BlockProcessorMock struct { RevertAccountStateCalled func() CreateBlockCalled func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.BodyHandler, error) RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error - ApplyBodyToHeaderCalled func(header data.HeaderHandler, body data.BodyHandler) error + ApplyBodyToHeaderCalled func(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) DecodeBlockBodyCalled func(dta []byte) data.BodyHandler DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler @@ -39,7 +40,7 @@ func (blProcMock *BlockProcessorMock) ProcessBlock(blockChain data.ChainHandler, return blProcMock.ProcessBlockCalled(blockChain, header, body, haveTime) } -func (blProcMock *BlockProcessorMock) ApplyProcessedMiniBlocks(miniBlocks map[string]map[string]struct{}) { +func (blProcMock *BlockProcessorMock) ApplyProcessedMiniBlocks(miniBlocks *processedMb.ProcessedMiniBlockTracker) { } @@ -66,7 +67,7 @@ func (blProcMock *BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHa return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } -func (blProcMock BlockProcessorMock) ApplyBodyToHeader(header data.HeaderHandler, body data.BodyHandler) error { +func (blProcMock BlockProcessorMock) ApplyBodyToHeader(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { return blProcMock.ApplyBodyToHeaderCalled(header, body) } diff --git a/integrationTests/mock/blockTrackerStub.go b/integrationTests/mock/blockTrackerStub.go new file mode 100644 index 00000000000..4003fca8807 --- /dev/null +++ b/integrationTests/mock/blockTrackerStub.go @@ -0,0 +1,160 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type BlockTrackerStub struct { + AddTrackedHeaderCalled func(header data.HeaderHandler, hash []byte) + AddCrossNotarizedHeaderCalled func(shardID uint32, crossNotarizedHeader data.HeaderHandler, crossNotarizedHeaderHash []byte) + AddSelfNotarizedHeaderCalled func(shardID uint32, selfNotarizedHeader data.HeaderHandler, selfNotarizedHeaderHash []byte) + CleanupHeadersBehindNonceCalled func(shardID uint32, selfNotarizedNonce uint64, crossNotarizedNonce uint64) + ComputeLongestChainCalled func(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) + ComputeLongestMetaChainFromLastNotarizedCalled func() ([]data.HeaderHandler, [][]byte, error) + ComputeLongestShardsChainsFromLastNotarizedCalled func() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) + DisplayTrackedHeadersCalled func() + GetCrossNotarizedHeaderCalled func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeaderCalled func(shardID uint32) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeadersForAllShardsCalled func() (map[uint32]data.HeaderHandler, error) + GetTrackedHeadersCalled func(shardID uint32) ([]data.HeaderHandler, [][]byte) + GetTrackedHeadersForAllShardsCalled func() map[uint32][]data.HeaderHandler + GetTrackedHeadersWithNonceCalled func(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) + IsShardStuckCalled func(shardId uint32) bool + RegisterCrossNotarizedHeadersHandlerCalled func(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RegisterSelfNotarizedHeadersHandlerCalled func(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RemoveLastNotarizedHeadersCalled func() + RestoreToGenesisCalled func() +} + +func (bts *BlockTrackerStub) AddTrackedHeader(header data.HeaderHandler, hash []byte) { + if bts.AddTrackedHeaderCalled != nil { + bts.AddTrackedHeaderCalled(header, hash) + } +} + +func (bts *BlockTrackerStub) AddCrossNotarizedHeader(shardID uint32, crossNotarizedHeader data.HeaderHandler, crossNotarizedHeaderHash []byte) { + if bts.AddCrossNotarizedHeaderCalled != nil { + bts.AddCrossNotarizedHeaderCalled(shardID, crossNotarizedHeader, crossNotarizedHeaderHash) + } +} + +func (bts *BlockTrackerStub) AddSelfNotarizedHeader(shardID uint32, selfNotarizedHeader data.HeaderHandler, selfNotarizedHeaderHash []byte) { + if bts.AddSelfNotarizedHeaderCalled != nil { + bts.AddSelfNotarizedHeaderCalled(shardID, selfNotarizedHeader, selfNotarizedHeaderHash) + } +} + +func (bts *BlockTrackerStub) CleanupHeadersBehindNonce(shardID uint32, selfNotarizedNonce uint64, crossNotarizedNonce uint64) { + if bts.CleanupHeadersBehindNonceCalled != nil { + bts.CleanupHeadersBehindNonceCalled(shardID, selfNotarizedNonce, crossNotarizedNonce) + } +} + +func (bts *BlockTrackerStub) ComputeLongestChain(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) { + if bts.ComputeLongestChainCalled != nil { + return bts.ComputeLongestChainCalled(shardID, header) + } + return nil, nil +} + +func (bts *BlockTrackerStub) ComputeLongestMetaChainFromLastNotarized() ([]data.HeaderHandler, [][]byte, error) { + if bts.ComputeLongestMetaChainFromLastNotarizedCalled != nil { + return bts.ComputeLongestMetaChainFromLastNotarizedCalled() + } + + return nil, nil, nil +} + +func (bts *BlockTrackerStub) ComputeLongestShardsChainsFromLastNotarized() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) { + if bts.ComputeLongestShardsChainsFromLastNotarizedCalled != nil { + return bts.ComputeLongestShardsChainsFromLastNotarizedCalled() + } + + return nil, nil, nil, nil +} + +func (bts *BlockTrackerStub) DisplayTrackedHeaders() { + if bts.DisplayTrackedHeadersCalled != nil { + bts.DisplayTrackedHeadersCalled() + } +} + +func (bts *BlockTrackerStub) GetCrossNotarizedHeader(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + if bts.GetCrossNotarizedHeaderCalled != nil { + return bts.GetCrossNotarizedHeaderCalled(shardID, offset) + } + + return nil, nil, nil +} + +func (bts *BlockTrackerStub) GetLastCrossNotarizedHeader(shardID uint32) (data.HeaderHandler, []byte, error) { + if bts.GetLastCrossNotarizedHeaderCalled != nil { + return bts.GetLastCrossNotarizedHeaderCalled(shardID) + } + + return nil, nil, nil +} + +func (bts *BlockTrackerStub) GetLastCrossNotarizedHeadersForAllShards() (map[uint32]data.HeaderHandler, error) { + if bts.GetLastCrossNotarizedHeadersForAllShardsCalled != nil { + return bts.GetLastCrossNotarizedHeadersForAllShardsCalled() + } + + return nil, nil +} + +func (bts *BlockTrackerStub) GetTrackedHeaders(shardID uint32) ([]data.HeaderHandler, [][]byte) { + if bts.GetTrackedHeadersCalled != nil { + return bts.GetTrackedHeadersCalled(shardID) + } + + return nil, nil +} + +func (bts *BlockTrackerStub) GetTrackedHeadersForAllShards() map[uint32][]data.HeaderHandler { + if bts.GetTrackedHeadersForAllShardsCalled != nil { + return bts.GetTrackedHeadersForAllShardsCalled() + } + + return nil +} + +func (bts *BlockTrackerStub) GetTrackedHeadersWithNonce(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) { + if bts.GetTrackedHeadersWithNonceCalled != nil { + return bts.GetTrackedHeadersWithNonceCalled(shardID, nonce) + } + + return nil, nil +} + +func (bts *BlockTrackerStub) IsShardStuck(shardId uint32) bool { + return bts.IsShardStuckCalled(shardId) +} + +func (bts *BlockTrackerStub) RegisterCrossNotarizedHeadersHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) { + if bts.RegisterCrossNotarizedHeadersHandlerCalled != nil { + bts.RegisterCrossNotarizedHeadersHandlerCalled(handler) + } +} + +func (bts *BlockTrackerStub) RegisterSelfNotarizedHeadersHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) { + if bts.RegisterSelfNotarizedHeadersHandlerCalled != nil { + bts.RegisterSelfNotarizedHeadersHandlerCalled(handler) + } +} + +func (bts *BlockTrackerStub) RemoveLastNotarizedHeaders() { + if bts.RemoveLastNotarizedHeadersCalled != nil { + bts.RemoveLastNotarizedHeadersCalled() + } +} + +func (bts *BlockTrackerStub) RestoreToGenesis() { + if bts.RestoreToGenesisCalled != nil { + bts.RestoreToGenesisCalled() + } +} + +func (bts *BlockTrackerStub) IsInterfaceNil() bool { + return bts == nil +} diff --git a/integrationTests/mock/countingDB.go b/integrationTests/mock/countingDB.go index 9a7bf9ddfe3..44f200d14a8 100644 --- a/integrationTests/mock/countingDB.go +++ b/integrationTests/mock/countingDB.go @@ -10,12 +10,11 @@ type countingDB struct { } func NewCountingDB() *countingDB { - db, _ := memorydb.New() - return &countingDB{db, 0} + return &countingDB{memorydb.New(), 0} } func (cdb *countingDB) Put(key, val []byte) error { - cdb.db.Put(key, val) + _ = cdb.db.Put(key, val) cdb.nrOfPut++ return nil } @@ -44,6 +43,10 @@ func (cdb *countingDB) Destroy() error { return cdb.db.Destroy() } +func (cdb *countingDB) DestroyClosed() error { + return cdb.Destroy() +} + func (cdb *countingDB) Reset() { cdb.nrOfPut = 0 } diff --git a/integrationTests/mock/endOfEpochTriggerStub.go b/integrationTests/mock/endOfEpochTriggerStub.go new file mode 100644 index 00000000000..cb10ab5a83f --- /dev/null +++ b/integrationTests/mock/endOfEpochTriggerStub.go @@ -0,0 +1,96 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart" +) + +type EpochStartTriggerStub struct { + ForceEpochStartCalled func(round uint64) error + IsEpochStartCalled func() bool + EpochCalled func() uint32 + ReceivedHeaderCalled func(handler data.HeaderHandler) + UpdateCalled func(round uint64) + ProcessedCalled func(header data.HeaderHandler) + EpochStartRoundCalled func() uint64 +} + +func (e *EpochStartTriggerStub) SetCurrentEpochStartRound(_ uint64) { +} + +func (e *EpochStartTriggerStub) NotifyAll(_ data.HeaderHandler) { +} + +func (e *EpochStartTriggerStub) SetFinalityAttestingRound(_ uint64) { +} + +func (e *EpochStartTriggerStub) EpochFinalityAttestingRound() uint64 { + return 0 +} + +func (e *EpochStartTriggerStub) EpochStartMetaHdrHash() []byte { + return nil +} + +func (e *EpochStartTriggerStub) GetRoundsPerEpoch() uint64 { + return 0 +} + +func (e *EpochStartTriggerStub) SetTrigger(_ epochStart.TriggerHandler) { +} + +func (e *EpochStartTriggerStub) Revert() { +} + +func (e *EpochStartTriggerStub) EpochStartRound() uint64 { + if e.EpochStartRoundCalled != nil { + return e.EpochStartRoundCalled() + } + return 0 +} + +func (e *EpochStartTriggerStub) Update(round uint64) { + if e.UpdateCalled != nil { + e.UpdateCalled(round) + } +} + +func (e *EpochStartTriggerStub) SetProcessed(header data.HeaderHandler) { + if e.ProcessedCalled != nil { + e.ProcessedCalled(header) + } +} + +func (e *EpochStartTriggerStub) ForceEpochStart(round uint64) error { + if e.ForceEpochStartCalled != nil { + return e.ForceEpochStartCalled(round) + } + return nil +} + +func (e *EpochStartTriggerStub) IsEpochStart() bool { + if e.IsEpochStartCalled != nil { + return e.IsEpochStartCalled() + } + return false +} + +func (e *EpochStartTriggerStub) Epoch() uint32 { + if e.EpochCalled != nil { + return e.EpochCalled() + } + return 0 +} + +func (e *EpochStartTriggerStub) ReceivedHeader(header data.HeaderHandler) { + if e.ReceivedHeaderCalled != nil { + e.ReceivedHeaderCalled(header) + } +} + +func (e *EpochStartTriggerStub) SetRoundsPerEpoch(_ uint64) { +} + +func (e *EpochStartTriggerStub) IsInterfaceNil() bool { + return e == nil +} diff --git a/integrationTests/mock/epochStartNotifierStub.go b/integrationTests/mock/epochStartNotifierStub.go new file mode 100644 index 00000000000..ebc7cd23239 --- /dev/null +++ b/integrationTests/mock/epochStartNotifierStub.go @@ -0,0 +1,34 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" +) + +type EpochStartNotifierStub struct { + RegisterHandlerCalled func(handler notifier.SubscribeFunctionHandler) + UnregisterHandlerCalled func(handler notifier.SubscribeFunctionHandler) + NotifyAllCalled func(hdr data.HeaderHandler) +} + +func (esnm *EpochStartNotifierStub) RegisterHandler(handler notifier.SubscribeFunctionHandler) { + if esnm.RegisterHandlerCalled != nil { + esnm.RegisterHandlerCalled(handler) + } +} + +func (esnm *EpochStartNotifierStub) UnregisterHandler(handler notifier.SubscribeFunctionHandler) { + if esnm.UnregisterHandlerCalled != nil { + esnm.UnregisterHandlerCalled(handler) + } +} + +func (esnm *EpochStartNotifierStub) NotifyAll(hdr data.HeaderHandler) { + if esnm.NotifyAllCalled != nil { + esnm.NotifyAllCalled(hdr) + } +} + +func (esnm *EpochStartNotifierStub) IsInterfaceNil() bool { + return esnm == nil +} diff --git a/integrationTests/mock/forkDetectorMock.go b/integrationTests/mock/forkDetectorMock.go index 9fe976330a4..4ef76bdaf94 100644 --- a/integrationTests/mock/forkDetectorMock.go +++ b/integrationTests/mock/forkDetectorMock.go @@ -7,41 +7,44 @@ import ( // ForkDetectorMock is a mock implementation for the ForkDetector interface type ForkDetectorMock struct { - AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error - RemoveHeadersCalled func(nonce uint64, hash []byte) - CheckForkCalled func() *process.ForkInfo - GetHighestFinalBlockNonceCalled func() uint64 - ProbableHighestNonceCalled func() uint64 - ResetProbableHighestNonceCalled func() - ResetForkCalled func() - GetNotarizedHeaderHashCalled func(nonce uint64) []byte - RestoreFinalCheckPointToGenesisCalled func() + AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error + RemoveHeaderCalled func(nonce uint64, hash []byte) + CheckForkCalled func() *process.ForkInfo + GetHighestFinalBlockNonceCalled func() uint64 + GetHighestFinalBlockHashCalled func() []byte + ProbableHighestNonceCalled func() uint64 + ResetForkCalled func() + GetNotarizedHeaderHashCalled func(nonce uint64) []byte + RestoreToGenesisCalled func() + SetRollBackNonceCalled func(nonce uint64) } -func (f *ForkDetectorMock) RestoreFinalCheckPointToGenesis() { - if f.RestoreFinalCheckPointToGenesisCalled != nil { - f.RestoreFinalCheckPointToGenesisCalled() - } +func (fdm *ForkDetectorMock) RestoreToGenesis() { + fdm.RestoreToGenesisCalled() } // AddHeader is a mock implementation for AddHeader -func (f *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { - return f.AddHeaderCalled(header, hash, state, finalHeaders, finalHeadersHashes, isNotarizedShardStuck) +func (fdm *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) } -// RemoveHeaders is a mock implementation for RemoveHeaders -func (f *ForkDetectorMock) RemoveHeaders(nonce uint64, hash []byte) { - f.RemoveHeadersCalled(nonce, hash) +// RemoveHeader is a mock implementation for RemoveHeader +func (fdm *ForkDetectorMock) RemoveHeader(nonce uint64, hash []byte) { + fdm.RemoveHeaderCalled(nonce, hash) } // CheckFork is a mock implementation for CheckFork -func (f *ForkDetectorMock) CheckFork() *process.ForkInfo { - return f.CheckForkCalled() +func (fdm *ForkDetectorMock) CheckFork() *process.ForkInfo { + return fdm.CheckForkCalled() } // GetHighestFinalBlockNonce is a mock implementation for GetHighestFinalBlockNonce -func (f *ForkDetectorMock) GetHighestFinalBlockNonce() uint64 { - return f.GetHighestFinalBlockNonceCalled() +func (fdm *ForkDetectorMock) GetHighestFinalBlockNonce() uint64 { + return fdm.GetHighestFinalBlockNonceCalled() +} + +func (fdm *ForkDetectorMock) GetHighestFinalBlockHash() []byte { + return fdm.GetHighestFinalBlockHashCalled() } // GetProbableHighestNonce is a mock implementation for GetProbableHighestNonce @@ -49,8 +52,10 @@ func (f *ForkDetectorMock) ProbableHighestNonce() uint64 { return f.ProbableHighestNonceCalled() } -func (fdm *ForkDetectorMock) ResetProbableHighestNonce() { - fdm.ResetProbableHighestNonceCalled() +func (fdm *ForkDetectorMock) SetRollBackNonce(nonce uint64) { + if fdm.SetRollBackNonceCalled != nil { + fdm.SetRollBackNonceCalled(nonce) + } } func (fdm *ForkDetectorMock) ResetFork() { @@ -63,8 +68,5 @@ func (fdm *ForkDetectorMock) GetNotarizedHeaderHash(nonce uint64) []byte { // IsInterfaceNil returns true if there is no value under the interface func (fdm *ForkDetectorMock) IsInterfaceNil() bool { - if fdm == nil { - return true - } - return false + return fdm == nil } diff --git a/integrationTests/mock/hasherSpongeMock.go b/integrationTests/mock/hasherSpongeMock.go deleted file mode 100644 index 2a1c66b9318..00000000000 --- a/integrationTests/mock/hasherSpongeMock.go +++ /dev/null @@ -1,33 +0,0 @@ -package mock - -import ( - "golang.org/x/crypto/blake2b" -) - -var hasherSpongeEmptyHash []byte - -const hashSize = 16 - -// HasherSpongeMock that will be used for testing -type HasherSpongeMock struct { -} - -// Compute will output the SHA's equivalent of the input string -func (sha HasherSpongeMock) Compute(s string) []byte { - h, _ := blake2b.New(hashSize, nil) - h.Write([]byte(s)) - return h.Sum(nil) -} - -// EmptyHash will return the equivalent of empty string SHA's -func (sha HasherSpongeMock) EmptyHash() []byte { - if len(hasherSpongeEmptyHash) == 0 { - hasherSpongeEmptyHash = sha.Compute("") - } - return hasherSpongeEmptyHash -} - -// Size returns the required size in bytes -func (HasherSpongeMock) Size() int { - return hashSize -} diff --git a/integrationTests/mock/headerResolverMock.go b/integrationTests/mock/headerResolverMock.go index d811bad7004..4f1a5348bf6 100644 --- a/integrationTests/mock/headerResolverMock.go +++ b/integrationTests/mock/headerResolverMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/p2p" ) @@ -8,6 +9,22 @@ type HeaderResolverMock struct { RequestDataFromHashCalled func(hash []byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error RequestDataFromNonceCalled func(nonce uint64) error + RequestDataFromEpochCalled func(identifier []byte) error + SetEpochHandlerCalled func(epochHandler dataRetriever.EpochHandler) error +} + +func (hrs *HeaderResolverMock) RequestDataFromEpoch(identifier []byte) error { + if hrs.RequestDataFromEpochCalled != nil { + return hrs.RequestDataFromEpochCalled(identifier) + } + return nil +} + +func (hrs *HeaderResolverMock) SetEpochHandler(epochHandler dataRetriever.EpochHandler) error { + if hrs.SetEpochHandlerCalled != nil { + return hrs.SetEpochHandlerCalled(epochHandler) + } + return nil } func (hrm *HeaderResolverMock) RequestDataFromHash(hash []byte) error { @@ -33,8 +50,5 @@ func (hrm *HeaderResolverMock) RequestDataFromNonce(nonce uint64) error { // IsInterfaceNil returns true if there is no value under the interface func (hrm *HeaderResolverMock) IsInterfaceNil() bool { - if hrm == nil { - return true - } - return false + return hrm == nil } diff --git a/integrationTests/mock/headersCacherStub.go b/integrationTests/mock/headersCacherStub.go new file mode 100644 index 00000000000..dd3c5acef04 --- /dev/null +++ b/integrationTests/mock/headersCacherStub.go @@ -0,0 +1,83 @@ +package mock + +import ( + "errors" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type HeadersCacherStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(shardHeaderHash []byte)) + KeysCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int +} + +func (hcs *HeadersCacherStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hcs.AddCalled != nil { + hcs.AddCalled(headerHash, header) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByHash(headerHash []byte) { + if hcs.RemoveHeaderByHashCalled != nil { + hcs.RemoveHeaderByHashCalled(headerHash) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hcs.RemoveHeaderByNonceAndShardIdCalled != nil { + hcs.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +func (hcs *HeadersCacherStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hcs.GetHeaderByNonceAndShardIdCalled != nil { + return hcs.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +func (hcs *HeadersCacherStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hcs.GetHeaderByHashCalled != nil { + return hcs.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +func (hcs *HeadersCacherStub) Clear() { + if hcs.ClearCalled != nil { + hcs.ClearCalled() + } +} + +func (hcs *HeadersCacherStub) RegisterHandler(handler func(shardHeaderHash []byte)) { + if hcs.RegisterHandlerCalled != nil { + hcs.RegisterHandlerCalled(handler) + } +} + +func (hcs *HeadersCacherStub) Keys(shardId uint32) []uint64 { + if hcs.KeysCalled != nil { + return hcs.KeysCalled(shardId) + } + return nil +} + +func (hcs *HeadersCacherStub) Len() int { + return 0 +} + +func (hcs *HeadersCacherStub) MaxSize() int { + return 100 +} + +func (hcs *HeadersCacherStub) IsInterfaceNil() bool { + return hcs == nil +} diff --git a/integrationTests/mock/intermediateTransactionHandlerMock.go b/integrationTests/mock/intermediateTransactionHandlerMock.go index 252bf569fc9..efd76ced16b 100644 --- a/integrationTests/mock/intermediateTransactionHandlerMock.go +++ b/integrationTests/mock/intermediateTransactionHandlerMock.go @@ -15,6 +15,10 @@ type IntermediateTransactionHandlerMock struct { GetAllCurrentFinishedTxsCalled func() map[string]data.TransactionHandler } +func (ith *IntermediateTransactionHandlerMock) GetCreatedInShardMiniBlock() *block.MiniBlock { + return &block.MiniBlock{} +} + func (ith *IntermediateTransactionHandlerMock) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { if ith.GetAllCurrentFinishedTxsCalled != nil { return ith.GetAllCurrentFinishedTxsCalled() diff --git a/integrationTests/mock/pendingMiniBlocksHandlerStub.go b/integrationTests/mock/pendingMiniBlocksHandlerStub.go new file mode 100644 index 00000000000..2abc7e96155 --- /dev/null +++ b/integrationTests/mock/pendingMiniBlocksHandlerStub.go @@ -0,0 +1,37 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" +) + +type PendingMiniBlocksHandlerStub struct { + PendingMiniBlockHeadersCalled func(lastNotarizedHeaders []data.HeaderHandler) ([]block.ShardMiniBlockHeader, error) + AddProcessedHeaderCalled func(handler data.HeaderHandler) error + RevertHeaderCalled func(handler data.HeaderHandler) error +} + +func (p *PendingMiniBlocksHandlerStub) PendingMiniBlockHeaders(lastNotarizedHeaders []data.HeaderHandler) ([]block.ShardMiniBlockHeader, error) { + if p.PendingMiniBlockHeadersCalled != nil { + return p.PendingMiniBlockHeadersCalled(lastNotarizedHeaders) + } + return nil, nil +} + +func (p *PendingMiniBlocksHandlerStub) AddProcessedHeader(handler data.HeaderHandler) error { + if p.AddProcessedHeaderCalled != nil { + return p.AddProcessedHeaderCalled(handler) + } + return nil +} + +func (p *PendingMiniBlocksHandlerStub) RevertHeader(handler data.HeaderHandler) error { + if p.RevertHeaderCalled != nil { + return p.RevertHeaderCalled(handler) + } + return nil +} + +func (p *PendingMiniBlocksHandlerStub) IsInterfaceNil() bool { + return p == nil +} diff --git a/integrationTests/mock/poolsHolderStub.go b/integrationTests/mock/poolsHolderStub.go index 43599982ea8..e520295f70e 100644 --- a/integrationTests/mock/poolsHolderStub.go +++ b/integrationTests/mock/poolsHolderStub.go @@ -6,23 +6,17 @@ import ( ) type PoolsHolderStub struct { - HeadersCalled func() storage.Cacher - HeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher + HeadersCalled func() dataRetriever.HeadersPool PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher - MetaBlocksCalled func() storage.Cacher } -func (phs *PoolsHolderStub) Headers() storage.Cacher { +func (phs *PoolsHolderStub) Headers() dataRetriever.HeadersPool { return phs.HeadersCalled() } -func (phs *PoolsHolderStub) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phs.HeadersNoncesCalled() -} - func (phs *PoolsHolderStub) PeerChangesBlocks() storage.Cacher { return phs.PeerChangesBlocksCalled() } @@ -35,10 +29,6 @@ func (phs *PoolsHolderStub) MiniBlocks() storage.Cacher { return phs.MiniBlocksCalled() } -func (phs *PoolsHolderStub) MetaBlocks() storage.Cacher { - return phs.MetaBlocksCalled() -} - func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { return phs.UnsignedTransactionsCalled() } diff --git a/integrationTests/mock/shardedDataStub.go b/integrationTests/mock/shardedDataStub.go index 688a94904dd..3fa0868838e 100644 --- a/integrationTests/mock/shardedDataStub.go +++ b/integrationTests/mock/shardedDataStub.go @@ -47,10 +47,6 @@ func (sd *ShardedDataStub) MergeShardStores(sourceCacheId, destCacheId string) { sd.MergeShardStoresCalled(sourceCacheId, destCacheId) } -func (sd *ShardedDataStub) MoveData(sourceCacheId, destCacheId string, key [][]byte) { - sd.MoveDataCalled(sourceCacheId, destCacheId, key) -} - func (sd *ShardedDataStub) Clear() { sd.ClearCalled() } diff --git a/integrationTests/mock/transactionCoordinatorMock.go b/integrationTests/mock/transactionCoordinatorMock.go index 3769e30b714..2758f3d0a07 100644 --- a/integrationTests/mock/transactionCoordinatorMock.go +++ b/integrationTests/mock/transactionCoordinatorMock.go @@ -22,7 +22,11 @@ type TransactionCoordinatorMock struct { CreateMbsAndProcessTransactionsFromMeCalled func(maxTxRemaining uint32, maxMbRemaining uint32, haveTime func() bool) block.MiniBlockSlice CreateMarshalizedDataCalled func(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) GetAllCurrentUsedTxsCalled func(blockType block.Type) map[string]data.TransactionHandler - VerifyCreatedBlockTransactionsCalled func(body block.Body) error + VerifyCreatedBlockTransactionsCalled func(hdr data.HeaderHandler, body block.Body) error +} + +func (tcm *TransactionCoordinatorMock) CreateReceiptsHash() ([]byte, error) { + return []byte("receiptHash"), nil } func (tcm *TransactionCoordinatorMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { @@ -129,12 +133,12 @@ func (tcm *TransactionCoordinatorMock) GetAllCurrentUsedTxs(blockType block.Type return tcm.GetAllCurrentUsedTxsCalled(blockType) } -func (tcm *TransactionCoordinatorMock) VerifyCreatedBlockTransactions(body block.Body) error { +func (tcm *TransactionCoordinatorMock) VerifyCreatedBlockTransactions(hdr data.HeaderHandler, body block.Body) error { if tcm.VerifyCreatedBlockTransactionsCalled == nil { return nil } - return tcm.VerifyCreatedBlockTransactionsCalled(body) + return tcm.VerifyCreatedBlockTransactionsCalled(hdr, body) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/integrationTests/multiShard/block/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc_test.go index ed43a1cd601..5b68f5da992 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc_test.go @@ -89,7 +89,7 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { nodes[idxNodeShard1].OwnAccount.Nonce, factory.IELEVirtualMachine, ) - integrationTests.DeployScTx(nodes, idxNodeShard1, string(scCode)) + integrationTests.DeployScTx(nodes, idxNodeShard1, string(scCode), factory.IELEVirtualMachine) integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) round = integrationTests.IncrementAndPrintRound(round) @@ -234,7 +234,7 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { rewardValue := big.NewInt(10) integrationTests.MintAllNodes(nodes, initialVal) - integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode)) + integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode), factory.IELEVirtualMachine) round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) @@ -246,7 +246,7 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { hardCodedScResultingAddress, ) - roundsToWait := 6 + roundsToWait := 7 for i := 0; i < roundsToWait; i++ { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) idxValidators, idxProposers = idxProposers, idxValidators @@ -267,13 +267,14 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { hardCodedScResultingAddress, ) - //TODO investigate why do we need 7 rounds here roundsToWait = 7 for i := 0; i < roundsToWait; i++ { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) idxValidators, idxProposers = idxProposers, idxValidators } + time.Sleep(time.Second) + _ = integrationTests.CheckBalanceIsDoneCorrectlySCSideAndReturnExpectedVal(t, nodes, idxProposerShard1, topUpValue, big.NewInt(0), hardCodedScResultingAddress) integrationTests.CheckSenderBalanceOkAfterTopUpAndWithdraw(t, nodeWithCaller, initialVal, topUpValue, big.NewInt(0)) integrationTests.CheckRootHashes(t, nodes, idxProposers) @@ -354,7 +355,7 @@ func TestShouldProcessWithScTxsJoinNoCommitShouldProcessedByValidators(t *testin topUpValue := big.NewInt(500) integrationTests.MintAllNodes(nodes, initialVal) - integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode)) + integrationTests.DeployScTx(nodes, idxProposerShard1, string(scCode), factory.IELEVirtualMachine) round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) integrationTests.PlayerJoinsGame( @@ -453,7 +454,7 @@ func TestShouldSubtractTheCorrectTxFee(t *testing.T) { nodeShard0 := nodesMap[0][0] txData := "DEADBEEF@" + hex.EncodeToString(factory.InternalTestingVM) + "@00" dummyTx := &transaction.Transaction{ - Data: txData, + Data: []byte(txData), } gasLimit := nodeShard0.EconomicsData.ComputeGasLimit(dummyTx) gasLimit += integrationTests.OpGasValueForMockVm @@ -473,14 +474,15 @@ func TestShouldSubtractTheCorrectTxFee(t *testing.T) { ) randomness := generateInitialRandomness(uint32(maxShards)) - _, _, _, _ = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) - leaderPkBytes := nodeShard0.SpecialAddressHandler.LeaderAddress() + _, _, consensusNodes, _ := integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + shardId0 := uint32(0) + leaderPkBytes := consensusNodes[shardId0][0].SpecialAddressHandler.LeaderAddress() leaderAddress, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes(leaderPkBytes) _ = integrationTests.IncrementAndPrintRound(round) // test sender account decreased its balance with gasPrice * gasLimit - accnt, err := nodeShard0.AccntState.GetExistingAccount(ownerAddr) + accnt, err := consensusNodes[shardId0][0].AccntState.GetExistingAccount(ownerAddr) assert.Nil(t, err) ownerAccnt := accnt.(*state.Account) expectedBalance := big.NewInt(0).Set(initialVal) @@ -488,9 +490,9 @@ func TestShouldSubtractTheCorrectTxFee(t *testing.T) { expectedBalance.Sub(expectedBalance, txCost) assert.Equal(t, expectedBalance, ownerAccnt.Balance) - printContainingTxs(nodeShard0, nodeShard0.BlockChain.GetCurrentBlockHeader().(*block.Header)) + printContainingTxs(consensusNodes[shardId0][0], consensusNodes[shardId0][0].BlockChain.GetCurrentBlockHeader().(*block.Header)) - accnt, err = nodeShard0.AccntState.GetExistingAccount(leaderAddress) + accnt, err = consensusNodes[shardId0][0].AccntState.GetExistingAccount(leaderAddress) assert.Nil(t, err) leaderAccnt := accnt.(*state.Account) expectedBalance = big.NewInt(0).Set(txCost) diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 53e81bf35b4..a6b4de7427b 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -8,8 +8,11 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" ) func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { @@ -134,3 +137,205 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } } } + +func TestSimpleTransactionsWithMoreGasWhichYieldInReceiptsInMultiShardedEnvironment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + nodesPerShard := 3 + numMetachainNodes := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + + minGasLimit := uint64(10000) + for _, node := range nodes { + node.EconomicsData.SetMinGasLimit(minGasLimit) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + initialVal := big.NewInt(10000000) + sendValue := big.NewInt(5) + integrationTests.MintAllNodes(nodes, initialVal) + receiverAddress := []byte("12345678901234567890123456789012") + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + gasLimit := minGasLimit * 2 + time.Sleep(time.Second) + nrRoundsToTest := 10 + for i := 0; i <= nrRoundsToTest; i++ { + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + for _, node := range nodes { + integrationTests.CreateAndSendTransactionWithGasLimit(node, sendValue, gasLimit, receiverAddress, []byte("")) + } + + time.Sleep(2 * time.Second) + } + + time.Sleep(time.Second) + + txGasNeed := nodes[0].EconomicsData.GetMinGasLimit() + txGasPrice := nodes[0].EconomicsData.GetMinGasPrice() + + oneTxCost := big.NewInt(0).Add(sendValue, big.NewInt(0).SetUint64(txGasNeed*txGasPrice)) + txTotalCost := big.NewInt(0).Mul(oneTxCost, big.NewInt(int64(nrRoundsToTest))) + + expectedBalance := big.NewInt(0).Sub(initialVal, txTotalCost) + for _, verifierNode := range nodes { + for _, node := range nodes { + accWrp, err := verifierNode.AccntState.GetExistingAccount(node.OwnAccount.Address) + if err != nil { + continue + } + + account, _ := accWrp.(*state.Account) + assert.Equal(t, expectedBalance, account.Balance) + } + } +} + +func TestSimpleTransactionsWithMoreValueThanBalanceYieldReceiptsInMultiShardedEnvironment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + nodesPerShard := 2 + numMetachainNodes := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + + minGasLimit := uint64(10000) + for _, node := range nodes { + node.EconomicsData.SetMinGasLimit(minGasLimit) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + nrTxsToSend := uint64(10) + initialVal := big.NewInt(0).SetUint64(nrTxsToSend * minGasLimit * integrationTests.MinTxGasPrice) + halfInitVal := big.NewInt(0).Div(initialVal, big.NewInt(2)) + integrationTests.MintAllNodes(nodes, initialVal) + receiverAddress := []byte("12345678901234567890123456789012") + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + for _, node := range nodes { + for j := uint64(0); j < nrTxsToSend; j++ { + integrationTests.CreateAndSendTransactionWithGasLimit(node, halfInitVal, minGasLimit, receiverAddress, []byte("")) + } + } + + time.Sleep(2 * time.Second) + + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + for _, node := range nodes { + if node.ShardCoordinator.SelfId() == sharding.MetachainShardId { + continue + } + + bodyHandler := node.BlockChain.GetCurrentBlockBody() + body, _ := bodyHandler.(block.Body) + numInvalid := 0 + for _, mb := range body { + if mb.Type == block.InvalidBlock { + numInvalid++ + } + } + assert.Equal(t, 1, numInvalid) + } + + time.Sleep(time.Second) + numRoundsToTest := 6 + for i := 0; i < numRoundsToTest; i++ { + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + time.Sleep(time.Second) + } + + time.Sleep(time.Second) + + expectedReceiverValue := big.NewInt(0).Mul(big.NewInt(int64(len(nodes))), halfInitVal) + for _, verifierNode := range nodes { + for _, node := range nodes { + accWrp, err := verifierNode.AccntState.GetExistingAccount(node.OwnAccount.Address) + if err != nil { + continue + } + + account, _ := accWrp.(*state.Account) + assert.Equal(t, big.NewInt(0), account.Balance) + } + + receiver, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes(receiverAddress) + accWrp, err := verifierNode.AccntState.GetExistingAccount(receiver) + if err != nil { + continue + } + + account, _ := accWrp.(*state.Account) + assert.Equal(t, expectedReceiverValue, account.Balance) + } +} diff --git a/integrationTests/multiShard/block/interceptedBlocks_test.go b/integrationTests/multiShard/block/interceptedBlocks_test.go index 1a73770cd0a..04f33f20a13 100644 --- a/integrationTests/multiShard/block/interceptedBlocks_test.go +++ b/integrationTests/multiShard/block/interceptedBlocks_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -153,14 +154,14 @@ func TestMetaHeadersAreRequstedOnlyFromMetachain(t *testing.T) { for _, n := range nodes { if n.ShardCoordinator.SelfId() != sharding.MetachainShardId { - n.ShardDataPool.MetaBlocks().Put(metaHdrFromShardHash, metaHdrFromShard) + n.ShardDataPool.Headers().AddHeader(metaHdrFromShardHash, metaHdrFromShard) } } chanReceived := make(chan struct{}, 1000) - node4Meta.MetaDataPool.MetaBlocks().Put(metaHdrHashFromMetachain, metaHdrFromMetachain) - node1Shard0.ShardDataPool.MetaBlocks().Clear() - node1Shard0.ShardDataPool.MetaBlocks().RegisterHandler(func(key []byte) { + node4Meta.MetaDataPool.Headers().AddHeader(metaHdrHashFromMetachain, metaHdrFromMetachain) + node1Shard0.ShardDataPool.Headers().Clear() + node1Shard0.ShardDataPool.Headers().RegisterHandler(func(header data.HeaderHandler, key []byte) { chanReceived <- struct{}{} }) @@ -187,7 +188,7 @@ func requestAndRetrieveMetaHeader( return nil } - retrievedObject, _ := node.ShardDataPool.MetaBlocks().Get(hash) + retrievedObject, _ := node.ShardDataPool.Headers().GetHeaderByHash(hash) return retrievedObject.(*block.MetaBlock) } diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go index e79b742caca..6f5c4ef8eb1 100644 --- a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -2,9 +2,7 @@ package block import ( "context" - "encoding/json" "fmt" - "strings" "testing" "time" @@ -12,9 +10,8 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto/signing" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" - "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -30,6 +27,7 @@ func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing nbMetaNodes := 4 nbShards := 1 consensusGroupSize := 3 + singleSigner := &singlesig.BlsSingleSigner{} advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() @@ -67,7 +65,7 @@ func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing nonce := uint64(1) body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) - + header = fillHeaderFields(nodesMap[0][0], header, singleSigner) nodesMap[0][0].BroadcastBlock(body, header) time.Sleep(broadcastDelay) @@ -77,15 +75,15 @@ func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing // all nodes in metachain have the block header in pool as interceptor validates it for _, metaNode := range nodesMap[sharding.MetachainShardId] { - v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) - assert.True(t, ok) + v, err := metaNode.MetaDataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, err) assert.Equal(t, header, v) } // all nodes in shard have the block in pool as interceptor validates it for _, shardNode := range nodesMap[0] { - v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) - assert.True(t, ok) + v, err := shardNode.ShardDataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, err) assert.Equal(t, header, v) } } @@ -152,15 +150,15 @@ func TestInterceptedMetaBlockVerifiedWithCorrectConsensusGroup(t *testing.T) { // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus for _, metaNode := range nodesMap[sharding.MetachainShardId] { - v, ok := metaNode.MetaDataPool.MetaBlocks().Get(headerHash) - assert.True(t, ok) + v, err := metaNode.MetaDataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, err) assert.Equal(t, header, v) } // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus for _, shardNode := range nodesMap[0] { - v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) - assert.True(t, ok) + v, err := shardNode.ShardDataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, err) assert.Equal(t, header, v) } } @@ -180,7 +178,7 @@ func TestInterceptedShardBlockHeaderWithLeaderSignatureAndRandSeedChecks(t *test seedAddress := integrationTests.GetConnectableAddress(advertiser) - singleSigner := getBlockSingleSignerStub() + singleSigner := &singlesig.BlsSingleSigner{} keyGen := signing.NewKeyGenerator(kyber.NewSuitePairingBn256()) // create map of shard - testNodeProcessors for metachain and shard chain nodesMap := integrationTests.CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( @@ -214,9 +212,12 @@ func TestInterceptedShardBlockHeaderWithLeaderSignatureAndRandSeedChecks(t *test round := uint64(1) nonce := uint64(1) - body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + nodeToSendFrom := nodesMap[0][0] - nodesMap[0][0].BroadcastBlock(body, header) + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + header.SetPrevRandSeed(randomness) + header = fillHeaderFields(nodeToSendFrom, header, singleSigner) + nodeToSendFrom.BroadcastBlock(body, header) time.Sleep(broadcastDelay) @@ -225,20 +226,20 @@ func TestInterceptedShardBlockHeaderWithLeaderSignatureAndRandSeedChecks(t *test // all nodes in metachain have the block header in pool as interceptor validates it for _, metaNode := range nodesMap[sharding.MetachainShardId] { - v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) - assert.True(t, ok) + v, err := metaNode.MetaDataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, err) assert.Equal(t, header, v) } // all nodes in shard have the block in pool as interceptor validates it for _, shardNode := range nodesMap[0] { - v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) - assert.True(t, ok) + v, err := shardNode.ShardDataPool.Headers().GetHeaderByHash(headerHash) + assert.Nil(t, err) assert.Equal(t, header, v) } } -func TestInterceptedShardHeaderBlockWithWrongPreviousRandSeendShouldNotBeAccepted(t *testing.T) { +func TestInterceptedShardHeaderBlockWithWrongPreviousRandSeedShouldNotBeAccepted(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") } @@ -253,7 +254,7 @@ func TestInterceptedShardHeaderBlockWithWrongPreviousRandSeendShouldNotBeAccepte seedAddress := integrationTests.GetConnectableAddress(advertiser) - singleSigner := getBlockSingleSignerStub() + singleSigner := &singlesig.BlsSingleSigner{} keyGen := signing.NewKeyGenerator(kyber.NewSuitePairingBn256()) // create map of shard - testNodeProcessors for metachain and shard chain nodesMap := integrationTests.CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( @@ -296,36 +297,28 @@ func TestInterceptedShardHeaderBlockWithWrongPreviousRandSeendShouldNotBeAccepte // all nodes in metachain have the block header in pool as interceptor validates it for _, metaNode := range nodesMap[sharding.MetachainShardId] { - _, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) - assert.False(t, ok) + _, err := metaNode.MetaDataPool.Headers().GetHeaderByHash(headerHash) + assert.Error(t, err) } // all nodes in shard have the block in pool as interceptor validates it for _, shardNode := range nodesMap[0] { - _, ok := shardNode.ShardDataPool.Headers().Get(headerHash) - assert.False(t, ok) + _, err := shardNode.ShardDataPool.Headers().GetHeaderByHash(headerHash) + assert.Error(t, err) } } -func getBlockSingleSignerStub() crypto.SingleSigner { - singleSigner := singlesig.BlsSingleSigner{} - return &mock.SignerMock{ - SignStub: func(private crypto.PrivateKey, msg []byte) ([]byte, error) { - return singleSigner.Sign(private, msg) - }, - VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { - // if rand seed is verified, return nil if the message contains "random seed" - if strings.Contains(string(msg), "random seed") { - return nil - } - // if leader signature is verified, return nil if the signature contains "leader sign" - if strings.Contains(string(sig), "leader sign") { - var hdr block.Header - // if the marshalized struct is a header, everything is ok - err := json.Unmarshal(msg, &hdr) - return err - } - return singleSigner.Verify(public, msg, sig) - }, - } +func fillHeaderFields(proposer *integrationTests.TestProcessorNode, hdr data.HeaderHandler, signer crypto.SingleSigner) data.HeaderHandler { + leaderSk := proposer.NodeKeys.Sk + + randSeed, _ := signer.Sign(leaderSk, hdr.GetPrevRandSeed()) + hdr.SetRandSeed(randSeed) + + hdrClone := hdr.Clone() + hdrClone.SetLeaderSignature(nil) + headerJsonBytes, _ := integrationTests.TestMarshalizer.Marshal(hdrClone) + leaderSign, _ := signer.Sign(leaderSk, headerJsonBytes) + hdr.SetLeaderSignature(leaderSign) + + return hdr } diff --git a/integrationTests/multiShard/endOfEpoch/executingEpochChange_test.go b/integrationTests/multiShard/endOfEpoch/executingEpochChange_test.go new file mode 100644 index 00000000000..8b0c61374d9 --- /dev/null +++ b/integrationTests/multiShard/endOfEpoch/executingEpochChange_test.go @@ -0,0 +1,311 @@ +package epochStart + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func TestEpochStartChangeWithoutTransactionInMultiShardedEnvironment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + nodesPerShard := 3 + numMetachainNodes := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + + roundsPerEpoch := uint64(10) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + time.Sleep(time.Second) + + nrRoundsToPropagateMultiShard := 5 + /////////----- wait for epoch end period + for i := uint64(0); i <= roundsPerEpoch; i++ { + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + } + + time.Sleep(time.Second) + + for i := 0; i < nrRoundsToPropagateMultiShard; i++ { + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + } + + time.Sleep(time.Second) + + epoch := uint32(1) + verifyIfNodesHasCorrectEpoch(t, epoch, nodes) + verifyIfAddedShardHeadersAreWithNewEpoch(t, nodes) +} + +func TestEpochStartChangeWithContinuousTransactionsInMultiShardedEnvironment(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + nodesPerShard := 3 + numMetachainNodes := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodes( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + ) + + roundsPerEpoch := uint64(10) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + initialVal := big.NewInt(10000000) + sendValue := big.NewInt(5) + integrationTests.MintAllNodes(nodes, initialVal) + receiverAddress := []byte("12345678901234567890123456789012") + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + time.Sleep(time.Second) + + /////////----- wait for epoch end period + epoch := uint32(2) + nrRoundsToPropagateMultiShard := uint64(5) + for i := uint64(0); i <= (uint64(epoch)*roundsPerEpoch)+nrRoundsToPropagateMultiShard; i++ { + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + integrationTests.SyncBlock(t, nodes, idxProposers, round) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + for _, node := range nodes { + integrationTests.CreateAndSendTransaction(node, sendValue, receiverAddress, "") + } + + time.Sleep(time.Second) + } + + time.Sleep(time.Second) + + verifyIfNodesHasCorrectEpoch(t, epoch, nodes) + verifyIfAddedShardHeadersAreWithNewEpoch(t, nodes) +} + +func verifyIfNodesHasCorrectEpoch( + t *testing.T, + epoch uint32, + nodes []*integrationTests.TestProcessorNode, +) { + for _, node := range nodes { + currentShId := node.ShardCoordinator.SelfId() + currentHeader := node.BlockChain.GetCurrentBlockHeader() + assert.Equal(t, epoch, currentHeader.GetEpoch()) + + for _, testNode := range nodes { + if testNode.ShardCoordinator.SelfId() == currentShId { + testHeader := testNode.BlockChain.GetCurrentBlockHeader() + assert.Equal(t, testHeader.GetNonce(), currentHeader.GetNonce()) + } + } + } +} + +func verifyIfAddedShardHeadersAreWithNewEpoch( + t *testing.T, + nodes []*integrationTests.TestProcessorNode, +) { + for _, node := range nodes { + if node.ShardCoordinator.SelfId() != sharding.MetachainShardId { + continue + } + + currentMetaHdr, ok := node.BlockChain.GetCurrentBlockHeader().(*block.MetaBlock) + if !ok { + assert.Fail(t, "metablock should have been in current block header") + } + + shardHDrStorage := node.Storage.GetStorer(dataRetriever.BlockHeaderUnit) + for _, shardInfo := range currentMetaHdr.ShardInfo { + value, err := node.MetaDataPool.Headers().GetHeaderByHash(shardInfo.HeaderHash) + if err == nil { + header, ok := value.(data.HeaderHandler) + if !ok { + assert.Fail(t, "wrong type in shard header pool") + } + + assert.Equal(t, header.GetEpoch(), currentMetaHdr.GetEpoch()) + continue + } + + buff, err := shardHDrStorage.Get(shardInfo.HeaderHash) + assert.Nil(t, err) + + shardHeader := block.Header{} + err = integrationTests.TestMarshalizer.Unmarshal(&shardHeader, buff) + assert.Nil(t, err) + assert.Equal(t, shardHeader.Epoch, currentMetaHdr.Epoch) + } + } +} + +func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 2 + nbShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + roundsPerEpoch := uint64(5) + maxGasLimitPerBlock := uint64(100000) + gasPrice := uint64(10) + gasLimit := uint64(100) + for _, nodes := range nodesMap { + integrationTests.SetEconomicsParameters(nodes, maxGasLimitPerBlock, gasPrice, gasLimit) + integrationTests.DisplayAndStartNodes(nodes) + + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + round := uint64(1) + nonce := uint64(1) + nbBlocksProduced := 2 * roundsPerEpoch + + randomness := generateInitialRandomness(uint32(nbShards)) + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + + for i := uint64(0); i < nbBlocksProduced; i++ { + _, _, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + } + + time.Sleep(5 * time.Second) +} + +func generateInitialRandomness(nbShards uint32) map[uint32][]byte { + randomness := make(map[uint32][]byte) + + for i := uint32(0); i < nbShards; i++ { + randomness[i] = []byte("root hash") + } + + randomness[sharding.MetachainShardId] = []byte("root hash") + + return randomness +} + +func getBlockProposersIndexes( + consensusMap map[uint32][]*integrationTests.TestProcessorNode, + nodesMap map[uint32][]*integrationTests.TestProcessorNode, +) map[uint32]int { + + indexProposer := make(map[uint32]int) + + for sh, testNodeList := range nodesMap { + for k, testNode := range testNodeList { + if consensusMap[sh][0] == testNode { + indexProposer[sh] = k + } + } + } + + return indexProposer +} diff --git a/integrationTests/multiShard/metablock/blocksDissemination_test.go b/integrationTests/multiShard/metablock/blocksDissemination_test.go index 2af48124ad1..d9d970929bf 100644 --- a/integrationTests/multiShard/metablock/blocksDissemination_test.go +++ b/integrationTests/multiShard/metablock/blocksDissemination_test.go @@ -7,11 +7,9 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -68,7 +66,7 @@ func TestHeadersAreReceivedByMetachainAndShard(t *testing.T) { //all node should have received the meta header for _, n := range nodes { - assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterMetaRcv)) + assert.Equal(t, int32(2), atomic.LoadInt32(&n.CounterHdrRecv)) } } @@ -107,12 +105,12 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { _, hdr, _ := nodes[0].ProposeBlock(1, 1) shardHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(hdr) shardHeaderHash := integrationTests.TestHasher.Compute(string(shardHeaderBytes)) - nodes[0].ShardDataPool.Headers().HasOrAdd(shardHeaderHash, hdr) + nodes[0].ShardDataPool.Headers().AddHeader(shardHeaderHash, hdr) maxNumRequests := 5 for i := 0; i < maxNumRequests; i++ { for j := 0; j < numMetaNodes; j++ { - resolver, err := nodes[j+1].ResolverFinder.CrossShardResolver(factory.ShardHeadersForMetachainTopic, senderShard) + resolver, err := nodes[j+1].ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, senderShard) assert.Nil(t, err) _ = resolver.RequestDataFromHash(shardHeaderHash) } @@ -132,7 +130,7 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { metaHeaderBytes, _ := integrationTests.TestMarshalizer.Marshal(metaHdr) metaHeaderHash := integrationTests.TestHasher.Compute(string(metaHeaderBytes)) for i := 0; i < numMetaNodes; i++ { - nodes[i+1].MetaDataPool.MetaBlocks().HasOrAdd(metaHeaderHash, metaHdr) + nodes[i+1].MetaDataPool.Headers().AddHeader(metaHeaderHash, metaHdr) } for i := 0; i < maxNumRequests; i++ { @@ -147,7 +145,7 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { //all node should have received the meta header for _, n := range nodes { - assert.Equal(t, int32(1), atomic.LoadInt32(&n.CounterMetaRcv)) + assert.Equal(t, int32(2), atomic.LoadInt32(&n.CounterHdrRecv)) } fmt.Println("Generating meta header, save it in meta datapools and shard 0 node requests it after its nonce...") @@ -156,11 +154,7 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { metaHeaderBytes2, _ := integrationTests.TestMarshalizer.Marshal(metaHdr2) metaHeaderHash2 := integrationTests.TestHasher.Compute(string(metaHeaderBytes2)) for i := 0; i < numMetaNodes; i++ { - nodes[i+1].MetaDataPool.MetaBlocks().HasOrAdd(metaHeaderHash2, metaHdr2) - - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(sharding.MetachainShardId, metaHeaderHash2) - nodes[i+1].MetaDataPool.HeadersNonces().Merge(metaHdr2.GetNonce(), syncMap) + nodes[i+1].MetaDataPool.Headers().AddHeader(metaHeaderHash2, metaHdr2) } for i := 0; i < maxNumRequests; i++ { @@ -175,6 +169,6 @@ func TestHeadersAreResolvedByMetachainAndShard(t *testing.T) { //all node should have received the meta header for _, n := range nodes { - assert.Equal(t, int32(2), atomic.LoadInt32(&n.CounterMetaRcv)) + assert.Equal(t, int32(3), atomic.LoadInt32(&n.CounterHdrRecv)) } } diff --git a/integrationTests/multiShard/smartContract/scCallingSC_test.go b/integrationTests/multiShard/smartContract/scCallingSC_test.go index 5c339aa1cd4..fe104fd9ac4 100644 --- a/integrationTests/multiShard/smartContract/scCallingSC_test.go +++ b/integrationTests/multiShard/smartContract/scCallingSC_test.go @@ -27,8 +27,6 @@ func TestSCCallingInCrossShard(t *testing.T) { t.Skip("this is not a short test") } - _ = logger.SetLogLevel("*:INFO,*:DEBUG") - numOfShards := 2 nodesPerShard := 3 numMetachainNodes := 3 @@ -123,17 +121,27 @@ func TestSCCallingInCrossShardDelegation(t *testing.T) { numOfShards := 2 nodesPerShard := 3 numMetachainNodes := 3 + shardConsensusGroupSize := 2 + metaConsensusGroupSize := 2 advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() - nodes := integrationTests.CreateNodes( - numOfShards, + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( nodesPerShard, numMetachainNodes, + numOfShards, + shardConsensusGroupSize, + metaConsensusGroupSize, integrationTests.GetConnectableAddress(advertiser), ) + nodes := make([]*integrationTests.TestProcessorNode, 0) + + for _, nds := range nodesMap { + nodes = append(nodes, nds...) + } + idxProposers := make([]int, numOfShards+1) for i := 0; i < numOfShards; i++ { idxProposers[i] = i * nodesPerShard @@ -227,7 +235,7 @@ func putDeploySCToDataPool( SndAddr: pubkey, GasPrice: nodes[0].EconomicsData.GetMinGasPrice(), GasLimit: nodes[0].EconomicsData.MaxGasLimitPerBlock() - 1, - Data: scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine), + Data: []byte(scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine)), } txHash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, tx) diff --git a/integrationTests/multiShard/transaction/sendBulkTransactions_test.go b/integrationTests/multiShard/transaction/sendBulkTransactions_test.go index c161c6c62a2..54cc27a9913 100644 --- a/integrationTests/multiShard/transaction/sendBulkTransactions_test.go +++ b/integrationTests/multiShard/transaction/sendBulkTransactions_test.go @@ -177,9 +177,8 @@ func generateTx(sender crypto.PrivateKey, receiver crypto.PublicKey) *transactio SndAddr: senderBytes, GasPrice: integrationTests.MinTxGasPrice, GasLimit: integrationTests.MinTxGasLimit, - Data: "", + Data: []byte(""), Signature: nil, - Challenge: nil, } marshalizedTxBeforeSigning, _ := json.Marshal(tx) signer := singlesig.SchnorrSigner{} diff --git a/integrationTests/resolvers/headers_test.go b/integrationTests/resolvers/headers_test.go new file mode 100644 index 00000000000..789d115c660 --- /dev/null +++ b/integrationTests/resolvers/headers_test.go @@ -0,0 +1,221 @@ +package resolvers + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +//------- Request resolve by hash + +func TestRequestResolveShardHeadersByHashRequestingShardResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, shardId) + headerNonce := uint64(0) + header, hash := createShardHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, sharding.MetachainShardId) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveShardHeadersByHashRequestingMetaResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, sharding.MetachainShardId) + headerNonce := uint64(0) + header, hash := createShardHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.MetaDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, shardId) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveShardHeadersByHashRequestingShardResolvingMeta(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(sharding.MetachainShardId, shardId) + headerNonce := uint64(0) + header, hash := createShardHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.MetaDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, sharding.MetachainShardId) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +//------- Request resolve by nonce + +func TestRequestResolveShardHeadersByNonceRequestingShardResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, shardId) + headerNonce := uint64(0) + header, hash := createShardHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, sharding.MetachainShardId) + log.LogIfError(err) + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + assert.True(t, ok) + err = headerResolver.RequestDataFromNonce(headerNonce) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveShardHeadersByNonceRequestingMetaResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, sharding.MetachainShardId) + headerNonce := uint64(0) + header, hash := createShardHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.MetaDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, shardId) + log.LogIfError(err) + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + assert.True(t, ok) + err = headerResolver.RequestDataFromNonce(headerNonce) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveShardHeadersByNonceRequestingShardResolvingMeta(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(sharding.MetachainShardId, shardId) + headerNonce := uint64(0) + header, hash := createShardHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.MetaDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, sharding.MetachainShardId) + log.LogIfError(err) + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + assert.True(t, ok) + err = headerResolver.RequestDataFromNonce(headerNonce) + log.LogIfError(err) + + rm.waitWithTimeout() +} diff --git a/integrationTests/resolvers/metablock_test.go b/integrationTests/resolvers/metablock_test.go new file mode 100644 index 00000000000..a697ad4e05c --- /dev/null +++ b/integrationTests/resolvers/metablock_test.go @@ -0,0 +1,221 @@ +package resolvers + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +//------- Request resolve by hash + +func TestRequestResolveMetaHeadersByHashRequestingShardResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, shardId) + headerNonce := uint64(0) + header, hash := createMetaHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received meta header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveMetaHeadersByHashRequestingMetaResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, sharding.MetachainShardId) + headerNonce := uint64(0) + header, hash := createMetaHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.MetaDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received meta header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveMetaHeadersByHashRequestingShardResolvingMeta(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(sharding.MetachainShardId, shardId) + headerNonce := uint64(0) + header, hash := createMetaHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.MetaDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received meta header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +//------- Request resolve by nonce + +func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, shardId) + headerNonce := uint64(0) + header, hash := createMetaHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + log.LogIfError(err) + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + assert.True(t, ok) + err = headerResolver.RequestDataFromNonce(headerNonce) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveMetaHeadersByNonceRequestingMetaResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, sharding.MetachainShardId) + headerNonce := uint64(0) + header, hash := createMetaHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.ShardDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.MetaDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + log.LogIfError(err) + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + assert.True(t, ok) + err = headerResolver.RequestDataFromNonce(headerNonce) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingMeta(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(sharding.MetachainShardId, shardId) + headerNonce := uint64(0) + header, hash := createMetaHeader(headerNonce, integrationTests.IntegrationTestsChainID) + + //add header with nonce 0 in pool + nResolver.MetaDataPool.Headers().AddHeader(hash, header) + + //setup header received event + nRequester.ShardDataPool.Headers().RegisterHandler( + func(header data.HeaderHandler, key []byte) { + if bytes.Equal(key, hash) { + log.Info("received header", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.MetaChainResolver(factory.MetachainBlocksTopic) + log.LogIfError(err) + headerResolver, ok := resolver.(dataRetriever.HeaderResolver) + assert.True(t, ok) + err = headerResolver.RequestDataFromNonce(headerNonce) + log.LogIfError(err) + + rm.waitWithTimeout() +} diff --git a/integrationTests/resolvers/miniblocks_test.go b/integrationTests/resolvers/miniblocks_test.go new file mode 100644 index 00000000000..82b92cb86bb --- /dev/null +++ b/integrationTests/resolvers/miniblocks_test.go @@ -0,0 +1,138 @@ +package resolvers + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +func TestRequestResolveMiniblockByHashRequestingShardResolvingSameShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, shardId) + miniblock, hash := createMiniblock(shardId, shardId) + + //add miniblock in pool + _, _ = nResolver.ShardDataPool.MiniBlocks().HasOrAdd(hash, miniblock) + + //setup header received event + nRequester.ShardDataPool.MiniBlocks().RegisterHandler( + func(key []byte) { + if bytes.Equal(key, hash) { + log.Info("received miniblock", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.IntraShardResolver(factory.MiniBlocksTopic) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveMiniblockByHashRequestingShardResolvingOtherShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardIdResolver := uint32(0) + shardIdRequester := uint32(1) + nResolver, nRequester := createResolverRequester(shardIdResolver, shardIdRequester) + miniblock, hash := createMiniblock(shardIdResolver, shardIdRequester) + + //add miniblock in pool + _, _ = nResolver.ShardDataPool.MiniBlocks().HasOrAdd(hash, miniblock) + + //setup header received event + nRequester.ShardDataPool.MiniBlocks().RegisterHandler( + func(key []byte) { + if bytes.Equal(key, hash) { + log.Info("received miniblock", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.MiniBlocksTopic, shardIdResolver) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveMiniblockByHashRequestingShardResolvingMeta(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(sharding.MetachainShardId, shardId) + miniblock, hash := createMiniblock(shardId, shardId) + + //add miniblock in pool + _, _ = nResolver.MetaDataPool.MiniBlocks().HasOrAdd(hash, miniblock) + + //setup header received event + nRequester.ShardDataPool.MiniBlocks().RegisterHandler( + func(key []byte) { + if bytes.Equal(key, hash) { + log.Info("received miniblock", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.MiniBlocksTopic, sharding.MetachainShardId) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveMiniblockByHashRequestingMetaResolvingShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, sharding.MetachainShardId) + miniblock, hash := createMiniblock(shardId, sharding.MetachainShardId) + + //add miniblock in pool + _, _ = nResolver.ShardDataPool.MiniBlocks().HasOrAdd(hash, miniblock) + + //setup header received event + nRequester.MetaDataPool.MiniBlocks().RegisterHandler( + func(key []byte) { + if bytes.Equal(key, hash) { + log.Info("received miniblock", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.MiniBlocksTopic, shardId) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} diff --git a/integrationTests/resolvers/receiverMonitor.go b/integrationTests/resolvers/receiverMonitor.go new file mode 100644 index 00000000000..bc7b8710115 --- /dev/null +++ b/integrationTests/resolvers/receiverMonitor.go @@ -0,0 +1,40 @@ +package resolvers + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var defaultReceiverMonitorTimeout = time.Second * 2 + +type receiverMonitor struct { + timeout time.Duration + tb testing.TB + chanDone chan struct{} +} + +func newReceiverMonitor(tb testing.TB) *receiverMonitor { + return &receiverMonitor{ + timeout: defaultReceiverMonitorTimeout, + tb: tb, + chanDone: make(chan struct{}, 1), + } +} + +func (rm *receiverMonitor) done() { + select { + case rm.chanDone <- struct{}{}: + default: + } +} + +func (rm *receiverMonitor) waitWithTimeout() { + select { + case <-rm.chanDone: + return + case <-time.After(rm.timeout): + assert.Fail(rm.tb, "timout waiting for data") + } +} diff --git a/integrationTests/resolvers/rewards_test.go b/integrationTests/resolvers/rewards_test.go new file mode 100644 index 00000000000..aa56b585d87 --- /dev/null +++ b/integrationTests/resolvers/rewards_test.go @@ -0,0 +1,75 @@ +package resolvers + +import ( + "bytes" + "testing" + + "github.com/ElrondNetwork/elrond-go/process/factory" +) + +func TestRequestResolveRewardsByHashRequestingShardResolvingSameShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardId := uint32(0) + nResolver, nRequester := createResolverRequester(shardId, shardId) + headerNonce := uint64(0) + reward, hash := createReward(headerNonce, shardId) + + //add reward with round 0 in pool + nResolver.ShardDataPool.RewardTransactions().AddData(hash, reward, "cache") + + //setup header received event + nRequester.ShardDataPool.RewardTransactions().RegisterHandler( + func(key []byte) { + if bytes.Equal(key, hash) { + log.Info("received reward tx", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} + +func TestRequestResolveRewardsByHashRequestingShardResolvingOtherShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + rm := newReceiverMonitor(t) + shardIdResolver := uint32(0) + shardIdRequester := uint32(1) + nResolver, nRequester := createResolverRequester(shardIdResolver, shardIdRequester) + headerNonce := uint64(0) + reward, hash := createReward(headerNonce, shardIdRequester) + + //add reward with round 0 in pool + nResolver.ShardDataPool.RewardTransactions().AddData(hash, reward, "cache") + + //setup header received event + nRequester.ShardDataPool.RewardTransactions().RegisterHandler( + func(key []byte) { + if bytes.Equal(key, hash) { + log.Info("received reward tx", "hash", key) + rm.done() + } + }, + ) + + //request by hash should work + resolver, err := nRequester.ResolverFinder.CrossShardResolver(factory.RewardsTransactionTopic, shardIdResolver) + log.LogIfError(err) + err = resolver.RequestDataFromHash(hash) + log.LogIfError(err) + + rm.waitWithTimeout() +} diff --git a/integrationTests/resolvers/testInitializer.go b/integrationTests/resolvers/testInitializer.go new file mode 100644 index 00000000000..c44c365db56 --- /dev/null +++ b/integrationTests/resolvers/testInitializer.go @@ -0,0 +1,115 @@ +package resolvers + +import ( + "math/big" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/logger" +) + +var log = logger.GetOrCreate("integrationtests/multishard/resolvers") + +func createResolverRequester( + resolverShardID uint32, + requesterShardID uint32, +) (*integrationTests.TestProcessorNode, *integrationTests.TestProcessorNode) { + + numShards := uint32(2) + + advertiserAddress := "" + txSignShardId := uint32(0) + nResolver := integrationTests.NewTestProcessorNode(numShards, resolverShardID, txSignShardId, advertiserAddress) + nRequester := integrationTests.NewTestProcessorNode(numShards, requesterShardID, txSignShardId, advertiserAddress) + + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + log.LogIfError(err) + + time.Sleep(time.Second) + + return nResolver, nRequester +} + +func createShardHeader(nonce uint64, chainID []byte) (data.HeaderHandler, []byte) { + hdr := &block.Header{ + Nonce: nonce, + PubKeysBitmap: []byte{255, 0}, + Signature: []byte("signature"), + PrevHash: []byte("prev hash"), + TimeStamp: uint64(time.Now().Unix()), + Round: 1, + Epoch: 2, + ShardId: 0, + BlockBodyType: block.TxBlock, + RootHash: []byte{255, 255}, + PrevRandSeed: make([]byte, 0), + RandSeed: make([]byte, 0), + MiniBlockHeaders: make([]block.MiniBlockHeader, 0), + ChainID: chainID, + } + + hash, err := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, hdr) + log.LogIfError(err) + + return hdr, hash +} + +func createMetaHeader(nonce uint64, chainID []byte) (data.HeaderHandler, []byte) { + hdr := &block.MetaBlock{ + Nonce: nonce, + Epoch: 0, + ShardInfo: make([]block.ShardData, 0), + PeerInfo: make([]block.PeerData, 0), + Signature: []byte("signature"), + PubKeysBitmap: []byte{1}, + PrevHash: []byte("prev hash"), + PrevRandSeed: []byte("prev rand seed"), + RandSeed: []byte("rand seed"), + RootHash: []byte("root hash"), + TxCount: 0, + ChainID: chainID, + } + + hash, err := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, hdr) + log.LogIfError(err) + + return hdr, hash +} + +func createMiniblock(senderShardId uint32, receiverSharId uint32) (*block.MiniBlock, []byte) { + dummyTxHash := make([]byte, integrationTests.TestHasher.Size()) + miniblock := &block.MiniBlock{ + TxHashes: [][]byte{dummyTxHash}, + ReceiverShardID: receiverSharId, + SenderShardID: senderShardId, + Type: 0, + } + + hash, err := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, miniblock) + log.LogIfError(err) + + return miniblock, hash +} + +func createReward(round uint64, shardId uint32) (data.TransactionHandler, []byte) { + reward := &rewardTx.RewardTx{ + Round: round, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: make([]byte, integrationTests.TestHasher.Size()), + ShardId: shardId, + } + + hash, err := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, reward) + log.LogIfError(err) + + return reward, hash +} diff --git a/integrationTests/singleShard/block/consensusNotAchieved_test.go b/integrationTests/singleShard/block/consensusNotAchieved_test.go new file mode 100644 index 00000000000..e47b8c47fd9 --- /dev/null +++ b/integrationTests/singleShard/block/consensusNotAchieved_test.go @@ -0,0 +1,171 @@ +package block + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/stretchr/testify/assert" +) + +func TestConsensus_BlockWithoutTwoThirdsPlusOneSignaturesOrWrongBitmapShouldNotBeAccepted(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + maxShards := 1 + consensusGroupSize := 2 + nodesPerShard := 5 + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + singleSigner := &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + return nil + }, + SignStub: func(private crypto.PrivateKey, msg []byte) ([]byte, error) { + return nil, nil + }, + } + keyGen := &mock.KeyGenMock{} + + // create map of shards - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( + nodesPerShard, + nodesPerShard, + maxShards, + consensusGroupSize, + consensusGroupSize, + integrationTests.GetConnectableAddress(advertiser), + singleSigner, + keyGen, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + integrationTests.SetEconomicsParameters(nodes, integrationTests.MaxGasLimitPerBlock, integrationTests.MinTxGasPrice, integrationTests.MinTxGasLimit) + //set rewards = 0 so we can easily test the balances taking into account only the tx fee + for _, n := range nodes { + n.EconomicsData.SetRewards(big.NewInt(0)) + } + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Delaying for nodes p2p bootstrap...") + time.Sleep(stepDelay) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + bitMapNotEnough := []byte{1} + body, hdr, _ := proposeBlock(nodesMap[0][0], round, nonce, bitMapNotEnough) + assert.NotNil(t, body) + assert.NotNil(t, hdr) + + nodesMap[0][0].BroadcastBlock(body, hdr) + time.Sleep(stepDelay) + + // the block should have not pass the interceptor + assert.Equal(t, int32(0), nodesMap[0][1].CounterHdrRecv) + + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + bitMapTooBig := []byte{1, 0, 1, 0, 1} // only one byte was needed, so this block should not pass + body, hdr, _ = proposeBlock(nodesMap[0][0], round, nonce, bitMapTooBig) + assert.NotNil(t, body) + assert.NotNil(t, hdr) + + nodesMap[0][0].BroadcastBlock(body, hdr) + time.Sleep(stepDelay) + + // this block should have not passed the interceptor + assert.Equal(t, int32(0), nodesMap[0][1].CounterHdrRecv) + + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + bitMapEnough := []byte{11} // 11 = 0b0000 1011 so 3 signatures + body, hdr, _ = proposeBlock(nodesMap[0][0], round, nonce, bitMapEnough) + assert.NotNil(t, body) + assert.NotNil(t, hdr) + + nodesMap[0][0].BroadcastBlock(body, hdr) + time.Sleep(stepDelay) + + // this block should have passed the interceptor + assert.Equal(t, int32(1), nodesMap[0][1].CounterHdrRecv) +} + +func proposeBlock(node *integrationTests.TestProcessorNode, round uint64, nonce uint64, bitmap []byte) (data.BodyHandler, data.HeaderHandler, [][]byte) { + startTime := time.Now() + maxTime := time.Second * 2 + + haveTime := func() bool { + elapsedTime := time.Since(startTime) + remainingTime := maxTime - elapsedTime + return remainingTime > 0 + } + + blockHeader := &block.Header{} + + blockHeader.SetShardID(0) + blockHeader.SetRound(round) + blockHeader.SetNonce(nonce) + blockHeader.SetPubKeysBitmap(bitmap) + currHdr := node.BlockChain.GetCurrentBlockHeader() + if currHdr == nil { + currHdr = node.BlockChain.GetGenesisHeader() + } + + buff, _ := json.Marshal(currHdr) + blockHeader.SetPrevHash(integrationTests.TestHasher.Compute(string(buff))) + blockHeader.SetPrevRandSeed(currHdr.GetRandSeed()) + blockHeader.SetSignature([]byte("aggregate signature")) + blockHeader.SetRandSeed([]byte("aggregate signature")) + blockHeader.SetLeaderSignature([]byte("leader sign")) + blockHeader.SetChainID(node.ChainID) + + blockBody, err := node.BlockProcessor.CreateBlockBody(blockHeader, haveTime) + if err != nil { + fmt.Println(err.Error()) + } + blockBody, err = node.BlockProcessor.ApplyBodyToHeader(blockHeader, blockBody) + if err != nil { + fmt.Println(err.Error()) + } + + shardBlockBody, ok := blockBody.(block.Body) + txHashes := make([][]byte, 0) + if !ok { + return blockBody, blockHeader, txHashes + } + + for _, mb := range shardBlockBody { + for _, hash := range mb.TxHashes { + copiedHash := make([]byte, len(hash)) + copy(copiedHash, hash) + txHashes = append(txHashes, copiedHash) + } + } + + return blockBody, blockHeader, txHashes +} diff --git a/integrationTests/singleShard/block/executingMiniblocksSc_test.go b/integrationTests/singleShard/block/executingMiniblocksSc_test.go index d66ae709383..aaa06a4c59e 100644 --- a/integrationTests/singleShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/singleShard/block/executingMiniblocksSc_test.go @@ -9,12 +9,17 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core/statistics" + "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/storage/mock" "github.com/stretchr/testify/assert" ) -var agarioFile = "../../agarioV3.hex" +var agarioFile = "../../agar_v1_min.hex" var stepDelay = time.Second func TestShouldProcessWithScTxsJoinAndRewardOneRound(t *testing.T) { @@ -77,9 +82,10 @@ func TestShouldProcessWithScTxsJoinAndRewardOneRound(t *testing.T) { integrationTests.MintAllNodes(nodes, initialVal) integrationTests.MintAllPlayers(nodes, players, initialVal) - integrationTests.DeployScTx(nodes, idxProposer, string(scCode)) + integrationTests.DeployScTx(nodes, idxProposer, string(scCode), factory.IELEVirtualMachine) time.Sleep(stepDelay) integrationTests.ProposeBlock(nodes, []int{idxProposer}, round, nonce) + time.Sleep(stepDelay) integrationTests.SyncBlock(t, nodes, []int{idxProposer}, round) round = integrationTests.IncrementAndPrintRound(round) nonce++ @@ -153,7 +159,6 @@ func runMultipleRoundsOfTheGame( // waiting to disseminate transactions time.Sleep(stepDelay) - round, nonce = integrationTests.ProposeAndSyncBlocks(t, nodes, idxProposers, round, nonce) integrationTests.CheckJoinGame(t, nodes, players, topUpValue, idxProposers[0], hardCodedScResultingAddress) @@ -164,12 +169,143 @@ func runMultipleRoundsOfTheGame( // waiting to disseminate transactions time.Sleep(stepDelay) - round, nonce = integrationTests.ProposeAndSyncBlocks(t, nodes, idxProposers, round, nonce) + time.Sleep(stepDelay) integrationTests.CheckRewardsDistribution(t, nodes, players, topUpValue, totalWithdrawValue, hardCodedScResultingAddress, idxProposers[0]) - fmt.Println(rMonitor.GenerateStatistics()) + fmt.Println(rMonitor.GenerateStatistics(&config.Config{AccountsTrieStorage: config.StorageConfig{DB: config.DBConfig{}}}, &mock.PathManagerStub{}, "")) + } +} + +func TestShouldProcessMultipleERC20ContractsInSingleShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + _ = logger.SetLogLevel("*:DEBUG") + + scCode, err := ioutil.ReadFile("./wrc20_arwen_01.wasm") + assert.Nil(t, err) + + maxShards := uint32(1) + numOfNodes := 2 + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + + nodes := make([]*integrationTests.TestProcessorNode, numOfNodes) + for i := 0; i < numOfNodes; i++ { + nodes[i] = integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + ) + } + + idxProposer := 0 + numPlayers := 10 + players := make([]*integrationTests.TestWalletAccount, numPlayers) + for i := 0; i < numPlayers; i++ { + players[i] = integrationTests.CreateTestWalletAccount(nodes[idxProposer].ShardCoordinator, 0) + } + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Messenger.Close() + } + }() + + for _, n := range nodes { + _ = n.Messenger.Bootstrap() } + + fmt.Println("Delaying for nodes p2p bootstrap...") + time.Sleep(stepDelay) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") + hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000005006c560111a94e434413c1cdaafbc3e1348947d1d5b3a1") + nodes[idxProposer].LoadTxSignSkBytes(hardCodedSk) + + initialVal := big.NewInt(100000000000) + integrationTests.MintAllNodes(nodes, initialVal) + integrationTests.MintAllPlayers(nodes, players, initialVal) + + integrationTests.DeployScTx(nodes, idxProposer, hex.EncodeToString(scCode), factory.ArwenVirtualMachine) + time.Sleep(stepDelay) + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, []int{idxProposer}, round, nonce) + + playersDoTopUp(nodes[idxProposer], players, hardCodedScResultingAddress, big.NewInt(10000000)) + time.Sleep(stepDelay) + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, []int{idxProposer}, round, nonce) + + for i := 0; i < 100; i++ { + playersDoTransfer(nodes[idxProposer], players, hardCodedScResultingAddress, big.NewInt(100)) + } + + for i := 0; i < 10; i++ { + time.Sleep(stepDelay) + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, []int{idxProposer}, round, nonce) + } + integrationTests.CheckRootHashes(t, nodes, []int{idxProposer}) + + time.Sleep(1 * time.Second) +} + +func playersDoTopUp( + node *integrationTests.TestProcessorNode, + players []*integrationTests.TestWalletAccount, + scAddress []byte, + txValue *big.Int, +) { + for _, player := range players { + createAndSendTx(node, player, txValue, 20000, scAddress, []byte("topUp")) + } +} + +func playersDoTransfer( + node *integrationTests.TestProcessorNode, + players []*integrationTests.TestWalletAccount, + scAddress []byte, + txValue *big.Int, +) { + for _, playerToTransfer := range players { + for _, player := range players { + createAndSendTx(node, player, big.NewInt(0), 20000, scAddress, + []byte("transfer@"+hex.EncodeToString(playerToTransfer.Address.Bytes())+"@"+hex.EncodeToString(txValue.Bytes()))) + } + } +} + +func createAndSendTx( + node *integrationTests.TestProcessorNode, + player *integrationTests.TestWalletAccount, + txValue *big.Int, + gasLimit uint64, + rcvAddress []byte, + txData []byte, +) { + tx := &transaction.Transaction{ + Nonce: player.Nonce, + Value: txValue, + SndAddr: player.Address.Bytes(), + RcvAddr: rcvAddress, + Data: txData, + GasPrice: node.EconomicsData.GetMinGasPrice(), + GasLimit: gasLimit, + } + + txBuff, _ := integrationTests.TestMarshalizer.Marshal(tx) + tx.Signature, _ = player.SingleSigner.Sign(player.SkTxSign, txBuff) + + _, _ = node.SendTransaction(tx) + player.Nonce++ } diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index 2a45511573c..34770dae977 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -11,10 +11,10 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -60,18 +60,15 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { hdrHash2 := hasher.Compute(string(hdrBuff2)) //resolver has the headers - _, _ = nResolver.ShardDataPool.Headers().HasOrAdd(hdrHash1, &hdr1) + nResolver.ShardDataPool.Headers().AddHeader(hdrHash1, hdr1) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(0, hdrHash1) - nResolver.ShardDataPool.HeadersNonces().Merge(0, syncMap) _ = nResolver.Storage.GetStorer(dataRetriever.BlockHeaderUnit).Put(hdrHash2, hdrBuff2) _ = nResolver.Storage.GetStorer(dataRetriever.ShardHdrNonceHashDataUnit).Put(uint64Converter.ToByteSlice(1), hdrHash2) chanDone1, chanDone2 := wireUpHandler(nRequester, hdr1, hdr2) //request header from pool - res, err := nRequester.ResolverFinder.IntraShardResolver(factory.HeadersTopic) + res, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, sharding.MetachainShardId) assert.Nil(t, err) hdrResolver := res.(*resolvers.HeaderResolver) _ = hdrResolver.RequestDataFromNonce(0) @@ -123,18 +120,15 @@ func TestNode_InterceptedHeaderWithWrongChainIDShouldBeDiscarded(t *testing.T) { hdrHash2 := hasher.Compute(string(hdrBuff2)) //resolver has the headers - _, _ = nResolver.ShardDataPool.Headers().HasOrAdd(hdrHash1, &hdr1) + nResolver.ShardDataPool.Headers().AddHeader(hdrHash1, hdr1) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(0, hdrHash1) - nResolver.ShardDataPool.HeadersNonces().Merge(0, syncMap) _ = nResolver.Storage.GetStorer(dataRetriever.BlockHeaderUnit).Put(hdrHash2, hdrBuff2) _ = nResolver.Storage.GetStorer(dataRetriever.ShardHdrNonceHashDataUnit).Put(uint64Converter.ToByteSlice(1), hdrHash2) chanDone1, chanDone2 := wireUpHandler(nRequester, hdr1, hdr2) //request header from pool - res, err := nRequester.ResolverFinder.IntraShardResolver(factory.HeadersTopic) + res, err := nRequester.ResolverFinder.CrossShardResolver(factory.ShardBlocksTopic, sharding.MetachainShardId) assert.Nil(t, err) hdrResolver := res.(*resolvers.HeaderResolver) _ = hdrResolver.RequestDataFromNonce(0) @@ -192,16 +186,15 @@ func wireUpHandler( //wire up a received handler chanDone1 := make(chan struct{}, 1) chanDone2 := make(chan struct{}, 1) - nRequester.ShardDataPool.Headers().RegisterHandler(func(key []byte) { - hdrStored, _ := nRequester.ShardDataPool.Headers().Peek(key) + nRequester.ShardDataPool.Headers().RegisterHandler(func(header data.HeaderHandler, key []byte) { fmt.Printf("Received hash %v\n", base64.StdEncoding.EncodeToString(key)) - if reflect.DeepEqual(hdrStored, hdr1) && hdr1.GetSignature() != nil { + if reflect.DeepEqual(header, hdr1) && hdr1.GetSignature() != nil { fmt.Printf("Received header with hash %v\n", base64.StdEncoding.EncodeToString(key)) chanDone1 <- struct{}{} } - if reflect.DeepEqual(hdrStored, hdr2) && hdr2.GetSignature() != nil { + if reflect.DeepEqual(header, hdr2) && hdr2.GetSignature() != nil { fmt.Printf("Received header with hash %v\n", base64.StdEncoding.EncodeToString(key)) chanDone2 <- struct{}{} } diff --git a/integrationTests/singleShard/block/wrc20_arwen_01.wasm b/integrationTests/singleShard/block/wrc20_arwen_01.wasm new file mode 100755 index 00000000000..5790ea43f40 Binary files /dev/null and b/integrationTests/singleShard/block/wrc20_arwen_01.wasm differ diff --git a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go index 658b1d84305..61f3f7e8f9e 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx_test.go @@ -4,7 +4,6 @@ import ( "crypto/rand" "encoding/base64" "encoding/binary" - "encoding/hex" "fmt" "math/big" "sync" @@ -145,7 +144,7 @@ func generateAndSendBulkSmartContractResults( } buff := make([]byte, 8) binary.BigEndian.PutUint64(buff, nonce) - uTx.Data = hex.EncodeToString(buff) + uTx.Data = buff uTxBytes, _ := marshalizer.Marshal(uTx) unsigedTxs = append(unsigedTxs, uTxBytes) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index b988ad3d3d0..80b6f57277f 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -60,7 +60,7 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { Value: big.NewInt(0), RcvAddr: integrationTests.TestHasher.Compute("receiver"), SndAddr: buffPk1, - Data: txData, + Data: []byte(txData), GasLimit: integrationTests.MinTxGasLimit + txDataCost, GasPrice: integrationTests.MinTxGasPrice, } diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go index 316304c0484..50e4ead0467 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx_test.go @@ -52,7 +52,7 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { Value: big.NewInt(0), RcvAddr: integrationTests.TestHasher.Compute("receiver"), SndAddr: buffPk1, - Data: "tx notarized data", + Data: []byte("tx notarized data"), TxHash: []byte("tx hash"), } diff --git a/integrationTests/state/stateTrieSync_test.go b/integrationTests/state/stateTrieSync_test.go new file mode 100644 index 00000000000..1afbbcc747e --- /dev/null +++ b/integrationTests/state/stateTrieSync_test.go @@ -0,0 +1,60 @@ +package state + +import ( + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process/factory" + "github.com/stretchr/testify/assert" +) + +func TestNode_RequestInterceptTrieNodesWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(integrationTests.SyncDelay) + + _ = nResolver.StateTrie.Update([]byte("doe"), []byte("reindeer")) + _ = nResolver.StateTrie.Update([]byte("dog"), []byte("puppy")) + _ = nResolver.StateTrie.Update([]byte("dogglesworth"), []byte("cat")) + _ = nResolver.StateTrie.Commit() + rootHash, _ := nResolver.StateTrie.Root() + + nilRootHash, _ := nRequester.StateTrie.Root() + trieNodesResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.TrieNodesTopic) + + waitTime := 5 * time.Second + trieSyncer, _ := trie.NewTrieSyncer(trieNodesResolver, nRequester.ShardDataPool.TrieNodes(), nRequester.StateTrie, waitTime) + err = trieSyncer.StartSyncing(rootHash) + assert.Nil(t, err) + + newRootHash, _ := nRequester.StateTrie.Root() + assert.NotEqual(t, nilRootHash, newRootHash) + assert.Equal(t, rootHash, newRootHash) +} diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index 308822f3622..c2ac16b833b 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -2,24 +2,32 @@ package state import ( "bytes" + "context" "encoding/base64" + "errors" "fmt" "math/big" "math/rand" "runtime" "sync" + "sync/atomic" "testing" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" transaction2 "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/trie/evictionWaitingList" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" "github.com/stretchr/testify/assert" ) @@ -30,10 +38,14 @@ func TestAccountsDB_RetrieveDataWithSomeValuesShouldWork(t *testing.T) { //and then reloading the data trie based on the root hash generated before t.Parallel() + key1 := []byte("ABC") + val1 := []byte("123") + key2 := []byte("DEF") + val2 := []byte("456") _, account, adb := integrationTests.GenerateAddressJournalAccountAccountsDB() - account.DataTrieTracker().SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) - account.DataTrieTracker().SaveKeyValue([]byte{68, 69, 70}, []byte{35, 36, 37}) + account.DataTrieTracker().SaveKeyValue(key1, val1) + account.DataTrieTracker().SaveKeyValue(key2, val2) err := adb.SaveDataTrie(account) assert.Nil(t, err) @@ -45,13 +57,13 @@ func TestAccountsDB_RetrieveDataWithSomeValuesShouldWork(t *testing.T) { assert.Nil(t, err) //verify data - dataRecovered, err := recoveredAccount.DataTrieTracker().RetrieveValue([]byte{65, 66, 67}) + dataRecovered, err := recoveredAccount.DataTrieTracker().RetrieveValue(key1) assert.Nil(t, err) - assert.Equal(t, []byte{32, 33, 34}, dataRecovered) + assert.Equal(t, val1, dataRecovered) - dataRecovered, err = recoveredAccount.DataTrieTracker().RetrieveValue([]byte{68, 69, 70}) + dataRecovered, err = recoveredAccount.DataTrieTracker().RetrieveValue(key2) assert.Nil(t, err) - assert.Equal(t, []byte{35, 36, 37}, dataRecovered) + assert.Equal(t, val2, dataRecovered) } func TestAccountsDB_PutCodeWithSomeValuesShouldWork(t *testing.T) { @@ -155,12 +167,12 @@ func TestAccountsDB_GetExistingAccountConcurrentlyShouldWork(t *testing.T) { adb, _, _ := integrationTests.CreateAccountsDB(0) wg := sync.WaitGroup{} - wg.Add(2000) + wg.Add(100) addresses := make([]state.AddressContainer, 0) - //generating 2000 different addresses - for len(addresses) < 2000 { + //generating 100 different addresses + for len(addresses) < 100 { addr := integrationTests.CreateRandomAddress() found := false @@ -176,7 +188,7 @@ func TestAccountsDB_GetExistingAccountConcurrentlyShouldWork(t *testing.T) { } } - for i := 0; i < 1000; i++ { + for i := 0; i < 50; i++ { go func(idx int) { accnt, err := adb.GetExistingAccount(addresses[idx*2]) @@ -221,8 +233,8 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { err = state2.(*state.Account).SetBalanceWithJournal(balance2) assert.Nil(t, err) - key := []byte{65, 66, 67} - val := []byte{32, 33, 34} + key := []byte("ABC") + val := []byte("123") state2.DataTrieTracker().SaveKeyValue(key, val) err = adb.SaveDataTrie(state2) @@ -260,7 +272,11 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { func TestTrieDB_RecreateFromStorageShouldWork(t *testing.T) { hasher := integrationTests.TestHasher store := integrationTests.CreateMemUnit() - tr1, _ := trie.NewTrie(store, integrationTests.TestMarshalizer, hasher) + evictionWaitListSize := uint(100) + ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) + trieStorage, _ := trie.NewTrieStorageManager(store, &config.DBConfig{}, ewl) + + tr1, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, hasher) key := hasher.Compute("key") value := hasher.Compute("value") @@ -301,8 +317,8 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te err = state2.(*state.Account).SetBalanceWithJournal(balance2) assert.Nil(t, err) - key := []byte{65, 66, 67} - val := []byte{32, 33, 34} + key := []byte("ABC") + val := []byte("123") state2.DataTrieTracker().SaveKeyValue(key, val) err = adb.SaveDataTrie(state2) @@ -316,7 +332,9 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te assert.Nil(t, err) fmt.Printf("Data committed! Root: %v\n", base64.StdEncoding.EncodeToString(rootHash)) - tr, _ := trie.NewTrie(mu, integrationTests.TestMarshalizer, integrationTests.TestHasher) + ewl, _ := evictionWaitingList.NewEvictionWaitingList(100, memorydb.New(), integrationTests.TestMarshalizer) + trieStorage, _ := trie.NewTrieStorageManager(mu, &config.DBConfig{}, ewl) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) adb, _ = state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) //reloading a new trie to test if data is inside @@ -557,6 +575,7 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { //adr1 puts code hash + code inside trie. adr2 has the same code hash //revert should work + code := []byte("ABC") adr1 := integrationTests.CreateRandomAddress() adr2 := integrationTests.CreateRandomAddress() @@ -570,7 +589,7 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { //Step 2. create 2 new accounts state1, err := adb.GetAccountWithJournal(adr1) assert.Nil(t, err) - err = adb.PutCode(state1, []byte{65, 66, 67}) + err = adb.PutCode(state1, code) assert.Nil(t, err) snapshotCreated1 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -581,7 +600,7 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { state2, err := adb.GetAccountWithJournal(adr2) assert.Nil(t, err) - err = adb.PutCode(state2, []byte{65, 66, 67}) + err = adb.PutCode(state2, code) assert.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() @@ -623,6 +642,8 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { //adr1 puts data inside trie. adr2 puts the same data //revert should work + key := []byte("ABC") + val := []byte("123") adr1 := integrationTests.CreateRandomAddress() adr2 := integrationTests.CreateRandomAddress() @@ -636,7 +657,7 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { //Step 2. create 2 new accounts state1, err := adb.GetAccountWithJournal(adr1) assert.Nil(t, err) - state1.DataTrieTracker().SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) + state1.DataTrieTracker().SaveKeyValue(key, val) err = adb.SaveDataTrie(state1) assert.Nil(t, err) snapshotCreated1 := adb.JournalLen() @@ -652,7 +673,7 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { state2, err := adb.GetAccountWithJournal(adr2) assert.Nil(t, err) - state2.DataTrieTracker().SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) + state2.DataTrieTracker().SaveKeyValue(key, val) err = adb.SaveDataTrie(state2) assert.Nil(t, err) snapshotCreated2 := adb.JournalLen() @@ -698,6 +719,9 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test //adr1 puts data inside trie. adr2 puts the same data //revert should work + key := []byte("ABC") + val := []byte("123") + newVal := []byte("124") adr1 := integrationTests.CreateRandomAddress() adr2 := integrationTests.CreateRandomAddress() @@ -711,7 +735,7 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test //Step 2. create 2 new accounts state1, err := adb.GetAccountWithJournal(adr1) assert.Nil(t, err) - state1.DataTrieTracker().SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) + state1.DataTrieTracker().SaveKeyValue(key, val) err = adb.SaveDataTrie(state1) assert.Nil(t, err) snapshotCreated1 := adb.JournalLen() @@ -727,14 +751,14 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test state2, err := adb.GetAccountWithJournal(adr2) assert.Nil(t, err) - state2.DataTrieTracker().SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 34}) + state2.DataTrieTracker().SaveKeyValue(key, val) err = adb.SaveDataTrie(state2) assert.Nil(t, err) snapshotCreated2 := adb.JournalLen() rootHash, err = adb.RootHash() assert.Nil(t, err) hrCreated2 := base64.StdEncoding.EncodeToString(rootHash) - rootHash, err = state1.DataTrie().Root() + rootHash, err = state2.DataTrie().Root() assert.Nil(t, err) hrRoot2 := base64.StdEncoding.EncodeToString(rootHash) @@ -745,8 +769,8 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test assert.NotEqual(t, snapshotCreated2, snapshotCreated1) assert.NotEqual(t, hrCreated1, hrCreated2) - //Test 2.2 test whether the datatrie roots match - assert.Equal(t, hrRoot1, hrRoot2) + //Test 2.2 test that the datatrie roots are different + assert.NotEqual(t, hrRoot1, hrRoot2) //Step 3. Commit rootCommit, err := adb.Commit() @@ -755,7 +779,7 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test //Step 4. 2-nd account changes its data snapshotMod := adb.JournalLen() - state2.DataTrieTracker().SaveKeyValue([]byte{65, 66, 67}, []byte{32, 33, 35}) + state2.DataTrieTracker().SaveKeyValue(key, newVal) err = adb.SaveDataTrie(state2) assert.Nil(t, err) rootHash, err = adb.RootHash() @@ -772,7 +796,6 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test assert.NotEqual(t, hrCreated2p1, hrCreated2) //Test 4.2 test whether the datatrie roots match/mismatch - assert.Equal(t, hrRoot1, hrRoot2) assert.NotEqual(t, hrRoot2, hrRoot2p1) //Step 5. Revert 2-nd account modification @@ -867,7 +890,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { fmt.Printf("Original root hash: %s\n", hrOriginal) for i := 1; i <= 1000; i++ { - err := integrationTests.AdbEmulateBalanceTxExecution(acntSrc.(*state.Account), acntDest.(*state.Account), big.NewInt(int64(i))) + err = integrationTests.AdbEmulateBalanceTxExecution(acntSrc.(*state.Account), acntDest.(*state.Account), big.NewInt(int64(i))) assert.Nil(t, err) } @@ -901,7 +924,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { st := time.Now() for i := 1; i <= 1000; i++ { - err := integrationTests.AdbEmulateBalanceTxExecution(acntSrc.(*state.Account), acntDest.(*state.Account), big.NewInt(int64(i))) + err = integrationTests.AdbEmulateBalanceTxExecution(acntSrc.(*state.Account), acntDest.(*state.Account), big.NewInt(int64(i))) assert.Nil(t, err) err = integrationTests.AdbEmulateBalanceTxExecution(acntDest.(*state.Account), acntSrc.(*state.Account), big.NewInt(int64(1000000))) @@ -997,7 +1020,11 @@ func createAccounts( ) (*state.AccountsDB, []state.AddressContainer, data.Trie) { cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) store, _ := storageUnit.NewStorageUnit(cache, persist) - tr, _ := trie.NewTrie(store, integrationTests.TestMarshalizer, integrationTests.TestHasher) + evictionWaitListSize := uint(100) + + ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) + trieStorage, _ := trie.NewTrieStorageManager(store, &config.DBConfig{}, ewl) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) adb, _ := state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) addr := make([]state.AddressContainer, nrOfAccounts) @@ -1065,3 +1092,466 @@ func BenchmarkTxExecution(b *testing.B) { integrationTests.AdbEmulateBalanceTxSafeExecution(acntSrc.(*state.Account), acntDest.(*state.Account), adb, big.NewInt(1)) } } + +func TestTrieDbPruning_GetAccountAfterPruning(t *testing.T) { + t.Parallel() + + evictionWaitListSize := uint(100) + ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) + trieStorage, _ := trie.NewTrieStorageManager(memorydb.New(), &config.DBConfig{}, ewl) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) + adb, _ := state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) + + address1, _ := integrationTests.TestAddressConverter.CreateAddressFromHex("0000000000000000000000000000000000000000000000000000000000000000") + address2, _ := integrationTests.TestAddressConverter.CreateAddressFromHex("0000000000000000000000000000000000000000000000000000000000000001") + address3, _ := integrationTests.TestAddressConverter.CreateAddressFromHex("0000000000000000000000000000000000000000000000000000000000000002") + + newDefaultAccount(adb, address1) + newDefaultAccount(adb, address2) + account := newDefaultAccount(adb, address3) + + rootHash1, _ := adb.Commit() + _ = account.(*state.Account).SetBalanceWithJournal(big.NewInt(1)) + rootHash2, _ := adb.Commit() + _ = tr.Prune(rootHash1, data.OldRoot) + + err := adb.RecreateTrie(rootHash2) + assert.Nil(t, err) + ok, err := adb.HasAccount(address1) + assert.True(t, ok) + assert.Nil(t, err) +} + +func newDefaultAccount(adb *state.AccountsDB, address state.AddressContainer) state.AccountHandler { + account, _ := adb.GetAccountWithJournal(address) + _ = account.(*state.Account).SetNonceWithJournal(0) + _ = account.(*state.Account).SetBalanceWithJournal(big.NewInt(0)) + + return account +} + +func TestTrieDbPruning_GetDataTrieTrackerAfterPruning(t *testing.T) { + t.Parallel() + + evictionWaitListSize := uint(100) + ewl, _ := evictionWaitingList.NewEvictionWaitingList(evictionWaitListSize, memorydb.New(), integrationTests.TestMarshalizer) + trieStorage, _ := trie.NewTrieStorageManager(memorydb.New(), &config.DBConfig{}, ewl) + tr, _ := trie.NewTrie(trieStorage, integrationTests.TestMarshalizer, integrationTests.TestHasher) + adb, _ := state.NewAccountsDB(tr, integrationTests.TestHasher, integrationTests.TestMarshalizer, factory.NewAccountCreator()) + + address1, _ := integrationTests.TestAddressConverter.CreateAddressFromHex("0000000000000000000000000000000000000000000000000000000000000000") + address2, _ := integrationTests.TestAddressConverter.CreateAddressFromHex("0000000000000000000000000000000000000000000000000000000000000001") + + key1 := []byte("ABC") + key2 := []byte("ABD") + value1 := []byte("dog") + value2 := []byte("puppy") + + state1, _ := adb.GetAccountWithJournal(address1) + state1.DataTrieTracker().SaveKeyValue(key1, value1) + state1.DataTrieTracker().SaveKeyValue(key2, value1) + _ = adb.SaveDataTrie(state1) + + state2, _ := adb.GetAccountWithJournal(address2) + state2.DataTrieTracker().SaveKeyValue(key1, value1) + state2.DataTrieTracker().SaveKeyValue(key2, value1) + _ = adb.SaveDataTrie(state2) + + oldRootHash, _ := adb.Commit() + + state2, _ = adb.GetAccountWithJournal(address2) + state2.DataTrieTracker().SaveKeyValue(key1, value2) + _ = adb.SaveDataTrie(state2) + + newRootHash, _ := adb.Commit() + _ = tr.Prune(oldRootHash, data.OldRoot) + + err := adb.RecreateTrie(newRootHash) + assert.Nil(t, err) + ok, err := adb.HasAccount(address1) + assert.True(t, ok) + assert.Nil(t, err) + + collapseTrie(state1, t) + collapseTrie(state2, t) + + val, err := state1.DataTrieTracker().RetrieveValue(key1) + assert.Nil(t, err) + assert.Equal(t, value1, val) + + val, err = state2.DataTrieTracker().RetrieveValue(key2) + assert.Nil(t, err) + assert.Equal(t, value1, val) +} + +func collapseTrie(state state.AccountHandler, t *testing.T) { + stateRootHash := state.GetRootHash() + stateTrie := state.DataTrieTracker().DataTrie() + stateNewTrie, _ := stateTrie.Recreate(stateRootHash) + assert.NotNil(t, stateNewTrie) + + state.DataTrieTracker().SetDataTrie(stateNewTrie) +} + +func TestRollbackBlockAndCheckThatPruningIsCancelled(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numNodesPerShard := 1 + numNodesMeta := 1 + + nodes, advertiser, idxProposers := integrationTests.SetupSyncNodesOneShardAndMeta(numNodesPerShard, numNodesMeta) + defer integrationTests.CloseProcessorNodes(nodes, advertiser) + + integrationTests.StartP2pBootstrapOnProcessorNodes(nodes) + integrationTests.StartSyncingBlocks(nodes) + + round := uint64(0) + nonce := uint64(0) + + valMinting := big.NewInt(1000000000) + valToTransferPerTx := big.NewInt(2) + + fmt.Println("Generating private keys for senders and receivers...") + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(1), 0) + nrTxs := 20 + + //sender shard keys, receivers keys + sendersPrivateKeys := make([]crypto.PrivateKey, nrTxs) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) + for i := 0; i < nrTxs; i++ { + sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, 0) + _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, 0) + receiversPublicKeys[0] = append(receiversPublicKeys[0], pk) + } + + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders(nodes, 0, sendersPrivateKeys, valMinting) + + shardNode := nodes[0] + + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + + rootHashOfFirstBlock, _ := shardNode.AccntState.RootHash() + + assert.Equal(t, uint64(1), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(1), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + fmt.Println("Generating transactions...") + integrationTests.GenerateAndDisseminateTxs(shardNode, sendersPrivateKeys, receiversPublicKeys, valToTransferPerTx, 1000, 1000) + fmt.Println("Delaying for disseminating transactions...") + time.Sleep(time.Second * 5) + + round, _ = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + time.Sleep(time.Second * 5) + + rootHashOfRollbackedBlock, _ := shardNode.AccntState.RootHash() + + assert.Equal(t, uint64(2), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(2), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + shardIdToRollbackLastBlock := uint32(0) + integrationTests.ForkChoiceOneBlock(nodes, shardIdToRollbackLastBlock) + integrationTests.ResetHighestProbableNonce(nodes, shardIdToRollbackLastBlock, 1) + integrationTests.EmptyDataPools(nodes, shardIdToRollbackLastBlock) + + assert.Equal(t, uint64(1), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(2), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + nonces := []*uint64{new(uint64), new(uint64)} + atomic.AddUint64(nonces[0], 2) + atomic.AddUint64(nonces[1], 3) + + numOfRounds := 2 + integrationTests.ProposeBlocks( + nodes, + &round, + idxProposers, + nonces, + numOfRounds, + ) + + err := shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) + assert.Nil(t, err) + assert.Equal(t, uint64(3), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(4), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + err = shardNode.AccntState.RecreateTrie(rootHashOfRollbackedBlock) + assert.True(t, errors.Is(err, trie.ErrHashNotFound)) +} + +func TestRollbackBlockWithSameRootHashAsPreviousAndCheckThatPruningIsNotDone(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numNodesPerShard := 1 + numNodesMeta := 1 + + nodes, advertiser, idxProposers := integrationTests.SetupSyncNodesOneShardAndMeta(numNodesPerShard, numNodesMeta) + defer integrationTests.CloseProcessorNodes(nodes, advertiser) + + integrationTests.StartP2pBootstrapOnProcessorNodes(nodes) + integrationTests.StartSyncingBlocks(nodes) + + round := uint64(0) + nonce := uint64(0) + + valMinting := big.NewInt(1000000000) + + fmt.Println("Generating private keys for senders and receivers...") + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(1), 0) + nrTxs := 20 + + //sender shard keys, receivers keys + sendersPrivateKeys := make([]crypto.PrivateKey, nrTxs) + for i := 0; i < nrTxs; i++ { + sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, 0) + } + + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders(nodes, 0, sendersPrivateKeys, valMinting) + + shardNode := nodes[0] + + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + + rootHashOfFirstBlock, _ := shardNode.AccntState.RootHash() + + assert.Equal(t, uint64(1), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(1), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + _, _ = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + time.Sleep(time.Second * 5) + + assert.Equal(t, uint64(2), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(2), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + shardIdToRollbackLastBlock := uint32(0) + integrationTests.ForkChoiceOneBlock(nodes, shardIdToRollbackLastBlock) + integrationTests.ResetHighestProbableNonce(nodes, shardIdToRollbackLastBlock, 1) + integrationTests.EmptyDataPools(nodes, shardIdToRollbackLastBlock) + + assert.Equal(t, uint64(1), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(2), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + err := shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) + assert.Nil(t, err) +} + +func TestTriePruningWhenBlockIsFinal(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + fmt.Println("Setup nodes...") + numOfShards := 1 + nodesPerShard := 1 + numMetachainNodes := 1 + + senderShard := uint32(0) + round := uint64(0) + nonce := uint64(0) + + valMinting := big.NewInt(1000000000) + valToTransferPerTx := big.NewInt(2) + + nodes, advertiser, idxProposers := integrationTests.SetupSyncNodesOneShardAndMeta(nodesPerShard, numMetachainNodes) + integrationTests.DisplayAndStartNodes(nodes) + + defer integrationTests.CloseProcessorNodes(nodes, advertiser) + + fmt.Println("Generating private keys for senders and receivers...") + generateCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), 0) + nrTxs := 20 + + //sender shard keys, receivers keys + sendersPrivateKeys := make([]crypto.PrivateKey, nrTxs) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) + for i := 0; i < nrTxs; i++ { + sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) + _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) + receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) + } + + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders(nodes, senderShard, sendersPrivateKeys, valMinting) + + shardNode := nodes[0] + + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + + assert.Equal(t, uint64(1), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(1), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + rootHashOfFirstBlock, _ := shardNode.AccntState.RootHash() + + fmt.Println("Generating transactions...") + integrationTests.GenerateAndDisseminateTxs(shardNode, sendersPrivateKeys, receiversPublicKeys, valToTransferPerTx, 1000, 1000) + fmt.Println("Delaying for disseminating transactions...") + time.Sleep(time.Second * 5) + + roundsToWait := 6 + for i := 0; i < roundsToWait; i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + } + + assert.Equal(t, uint64(7), nodes[0].BlockChain.GetCurrentBlockHeader().GetNonce()) + assert.Equal(t, uint64(7), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) + + err := shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) + assert.True(t, errors.Is(err, trie.ErrHashNotFound)) +} + +func TestSnapshotOnEpochChange(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + nodesPerShard := 3 + numMetachainNodes := 3 + stateCheckpointModulus := uint(3) + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodes := integrationTests.CreateNodesWithCustomStateCheckpointModulus( + numOfShards, + nodesPerShard, + numMetachainNodes, + integrationTests.GetConnectableAddress(advertiser), + stateCheckpointModulus, + ) + + roundsPerEpoch := uint64(5) + for _, node := range nodes { + node.EpochStartTrigger.SetRoundsPerEpoch(roundsPerEpoch) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + sendValue := big.NewInt(5) + receiverAddress := []byte("12345678901234567890123456789012") + initialVal := big.NewInt(10000000) + + integrationTests.MintAllNodes(nodes, initialVal) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + time.Sleep(integrationTests.StepDelay) + + checkpointsRootHashes := make(map[int][][]byte) + snapshotsRootHashes := make(map[int][][]byte) + prunedRootHashes := make(map[int][][]byte) + + numShardNodes := numOfShards * nodesPerShard + numRounds := uint32(9) + for i := uint64(0); i < uint64(numRounds); i++ { + + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + + for _, node := range nodes { + integrationTests.CreateAndSendTransaction(node, sendValue, receiverAddress, "") + } + time.Sleep(integrationTests.StepDelay) + + collectSnapshotAndCheckpointHashes( + nodes, + numShardNodes, + checkpointsRootHashes, + snapshotsRootHashes, + prunedRootHashes, + uint64(stateCheckpointModulus), + ) + } + + numDelayRounds := uint32(4) + for i := uint64(0); i < uint64(numDelayRounds); i++ { + round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) + time.Sleep(integrationTests.StepDelay) + } + + for i := 0; i < numOfShards*nodesPerShard; i++ { + testNodeStateCheckpointSnapshotAndPruning(t, nodes[i], checkpointsRootHashes[i], snapshotsRootHashes[i], prunedRootHashes[i]) + } +} + +func collectSnapshotAndCheckpointHashes( + nodes []*integrationTests.TestProcessorNode, + numShardNodes int, + checkpointsRootHashes map[int][][]byte, + snapshotsRootHashes map[int][][]byte, + prunedRootHashes map[int][][]byte, + stateCheckpointModulus uint64, +) { + for j := 0; j < numShardNodes; j++ { + currentBlockHeader := nodes[j].BlockChain.GetCurrentBlockHeader() + + if currentBlockHeader.IsStartOfEpochBlock() { + snapshotsRootHashes[j] = append(snapshotsRootHashes[j], currentBlockHeader.GetRootHash()) + continue + } + + checkpointRound := currentBlockHeader.GetRound()%uint64(stateCheckpointModulus) == 0 + if checkpointRound { + checkpointsRootHashes[j] = append(checkpointsRootHashes[j], currentBlockHeader.GetRootHash()) + continue + } + + prunedRootHashes[j] = append(prunedRootHashes[j], currentBlockHeader.GetRootHash()) + } +} + +func testNodeStateCheckpointSnapshotAndPruning( + t *testing.T, + node *integrationTests.TestProcessorNode, + checkpointsRootHashes [][]byte, + snapshotsRootHashes [][]byte, + prunedRootHashes [][]byte, +) { + + assert.Equal(t, 3, len(checkpointsRootHashes)) + for i := range checkpointsRootHashes { + tr, err := node.StateTrie.Recreate(checkpointsRootHashes[i]) + assert.Nil(t, err) + assert.NotNil(t, tr) + } + + assert.Equal(t, 1, len(snapshotsRootHashes)) + for i := range snapshotsRootHashes { + tr, err := node.StateTrie.Recreate(snapshotsRootHashes[i]) + assert.Nil(t, err) + assert.NotNil(t, tr) + } + + assert.Equal(t, 5, len(prunedRootHashes)) + for i := range prunedRootHashes { + tr, err := node.StateTrie.Recreate(prunedRootHashes[i]) + assert.Nil(t, tr) + assert.NotNil(t, err) + } +} diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 16ca199ad72..9e3d3ef005a 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -61,26 +61,26 @@ func TestSyncWorksInShard_EmptyBlocksNoForks(t *testing.T) { } fmt.Println("Delaying for nodes p2p bootstrap...") - time.Sleep(delayP2pBootstrap) + time.Sleep(integrationTests.P2pBootstrapDelay) round := uint64(0) nonce := uint64(0) round = integrationTests.IncrementAndPrintRound(round) - updateRound(nodes, round) + integrationTests.UpdateRound(nodes, round) nonce++ numRoundsToTest := 5 for i := 0; i < numRoundsToTest; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) - time.Sleep(stepSync) + time.Sleep(integrationTests.SyncDelay) round = integrationTests.IncrementAndPrintRound(round) - updateRound(nodes, round) + integrationTests.UpdateRound(nodes, round) nonce++ } - time.Sleep(stepSync) + time.Sleep(integrationTests.SyncDelay) testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) } @@ -124,26 +124,26 @@ func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { } fmt.Println("Delaying for nodes p2p bootstrap...") - time.Sleep(delayP2pBootstrap) + time.Sleep(integrationTests.P2pBootstrapDelay) round := uint64(0) nonce := uint64(0) round = integrationTests.IncrementAndPrintRound(round) - updateRound(nodes, round) + integrationTests.UpdateRound(nodes, round) nonce++ numRoundsToTest := 2 for i := 0; i < numRoundsToTest; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) - time.Sleep(stepSync) + time.Sleep(integrationTests.SyncDelay) round = integrationTests.IncrementAndPrintRound(round) - updateRound(nodes, round) + integrationTests.UpdateRound(nodes, round) nonce++ } - time.Sleep(stepSync) + time.Sleep(integrationTests.SyncDelay) pubKeysVariant1 := []byte{3} pubKeysVariant2 := []byte{1} @@ -151,12 +151,12 @@ func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { proposeBlockWithPubKeyBitmap(nodes[idxProposerShard0], round, nonce, pubKeysVariant1) proposeBlockWithPubKeyBitmap(nodes[1], round, nonce, pubKeysVariant2) - time.Sleep(stepDelay) + time.Sleep(integrationTests.StepDelay) round = integrationTests.IncrementAndPrintRound(round) - updateRound(nodes, round) + integrationTests.UpdateRound(nodes, round) - stepDelayForkResolving := 4 * stepDelay + stepDelayForkResolving := 4 * integrationTests.StepDelay time.Sleep(stepDelayForkResolving) testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) diff --git a/integrationTests/sync/common.go b/integrationTests/sync/common.go deleted file mode 100644 index 85a26e86107..00000000000 --- a/integrationTests/sync/common.go +++ /dev/null @@ -1,176 +0,0 @@ -package sync - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -var stepDelay = time.Second -var delayP2pBootstrap = time.Second * 2 -var stepSync = time.Second * 2 - -func setupSyncNodesOneShardAndMeta( - numNodesPerShard int, - numNodesMeta int, -) ([]*integrationTests.TestProcessorNode, p2p.Messenger, []int) { - - maxShards := uint32(1) - shardId := uint32(0) - - advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") - _ = advertiser.Bootstrap() - advertiserAddr := integrationTests.GetConnectableAddress(advertiser) - - nodes := make([]*integrationTests.TestProcessorNode, 0) - for i := 0; i < numNodesPerShard; i++ { - shardNode := integrationTests.NewTestSyncNode( - maxShards, - shardId, - shardId, - advertiserAddr, - ) - nodes = append(nodes, shardNode) - } - idxProposerShard0 := 0 - - for i := 0; i < numNodesMeta; i++ { - metaNode := integrationTests.NewTestSyncNode( - maxShards, - sharding.MetachainShardId, - shardId, - advertiserAddr, - ) - nodes = append(nodes, metaNode) - } - idxProposerMeta := len(nodes) - 1 - - idxProposers := []int{idxProposerShard0, idxProposerMeta} - - return nodes, advertiser, idxProposers -} - -func startSyncingBlocks(nodes []*integrationTests.TestProcessorNode) { - for _, n := range nodes { - _ = n.StartSync() - } - - fmt.Println("Delaying for nodes to start syncing blocks...") - time.Sleep(stepDelay) -} - -func updateRound(nodes []*integrationTests.TestProcessorNode, round uint64) { - for _, n := range nodes { - n.Rounder.IndexField = int64(round) - } -} - -func proposeAndSyncBlocks( - nodes []*integrationTests.TestProcessorNode, - round *uint64, - idxProposers []int, - nonces []*uint64, - numOfRounds int, -) { - - for i := 0; i < numOfRounds; i++ { - crtRound := atomic.LoadUint64(round) - proposeBlocks(nodes, idxProposers, nonces, crtRound) - - time.Sleep(stepSync) - - crtRound = integrationTests.IncrementAndPrintRound(crtRound) - atomic.StoreUint64(round, crtRound) - updateRound(nodes, crtRound) - incrementNonces(nonces) - } - time.Sleep(stepSync) -} - -func incrementNonces(nonces []*uint64) { - for i := 0; i < len(nonces); i++ { - atomic.AddUint64(nonces[i], 1) - } -} - -func proposeBlocks( - nodes []*integrationTests.TestProcessorNode, - idxProposers []int, - nonces []*uint64, - crtRound uint64, -) { - for idx, proposer := range idxProposers { - crtNonce := atomic.LoadUint64(nonces[idx]) - integrationTests.ProposeBlock(nodes, []int{proposer}, crtRound, crtNonce) - } -} - -func rollBackOneBlock(nodes []*integrationTests.TestProcessorNode, shardId uint32) { - for idx, n := range nodes { - if n.ShardCoordinator.SelfId() != shardId { - continue - } - err := n.Bootstrapper.RollBack(false) - if err != nil { - fmt.Println(err) - } - - newNonce := n.BlockChain.GetCurrentBlockHeader().GetNonce() - fmt.Printf("Node's id %d is at block height %d\n", idx, newNonce) - } -} - -func emptyDataPools(nodes []*integrationTests.TestProcessorNode, shardId uint32) { - for _, n := range nodes { - if n.ShardCoordinator.SelfId() != shardId { - continue - } - - emptyNodeDataPool(n) - } -} - -func emptyNodeDataPool(node *integrationTests.TestProcessorNode) { - if node.ShardDataPool != nil { - emptyShardDataPool(node.ShardDataPool) - } - if node.MetaDataPool != nil { - emptyMetaDataPool(node.MetaDataPool) - } -} - -func emptyShardDataPool(sdp dataRetriever.PoolsHolder) { - sdp.HeadersNonces().Clear() - sdp.Headers().Clear() - sdp.UnsignedTransactions().Clear() - sdp.Transactions().Clear() - sdp.MetaBlocks().Clear() - sdp.MiniBlocks().Clear() - sdp.PeerChangesBlocks().Clear() -} - -func emptyMetaDataPool(holder dataRetriever.MetaPoolsHolder) { - holder.HeadersNonces().Clear() - holder.MetaBlocks().Clear() - holder.MiniBlocks().Clear() - holder.ShardHeaders().Clear() -} - -func resetHighestProbableNonce(nodes []*integrationTests.TestProcessorNode, shardId uint32, targetNonce uint64) { - for _, n := range nodes { - if n.ShardCoordinator.SelfId() != shardId { - continue - } - if n.BlockChain.GetCurrentBlockHeader().GetNonce() != targetNonce { - continue - } - - n.Bootstrapper.SetProbableHighestNonce(targetNonce) - } -} diff --git a/integrationTests/sync/edgeCases_test.go b/integrationTests/sync/edgeCases_test.go index 1cbb3ccc10b..ac729fdc0bd 100644 --- a/integrationTests/sync/edgeCases_test.go +++ b/integrationTests/sync/edgeCases_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -20,15 +21,17 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { t.Skip("this is not a short test") } + _ = logger.SetLogLevel("*:DEBUG") + numNodesPerShard := 3 numNodesMeta := 3 - nodes, advertiser, idxProposers := setupSyncNodesOneShardAndMeta(numNodesPerShard, numNodesMeta) + nodes, advertiser, idxProposers := integrationTests.SetupSyncNodesOneShardAndMeta(numNodesPerShard, numNodesMeta) idxProposerMeta := idxProposers[1] defer integrationTests.CloseProcessorNodes(nodes, advertiser) integrationTests.StartP2pBootstrapOnProcessorNodes(nodes) - startSyncingBlocks(nodes) + integrationTests.StartSyncingBlocks(nodes) round := uint64(0) idxNonceShard := 0 @@ -36,11 +39,11 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { nonces := []*uint64{new(uint64), new(uint64)} round = integrationTests.IncrementAndPrintRound(round) - updateRound(nodes, round) - incrementNonces(nonces) + integrationTests.UpdateRound(nodes, round) + integrationTests.IncrementNonces(nonces) numRoundsBlocksAreProposedCorrectly := 3 - proposeAndSyncBlocks( + integrationTests.ProposeBlocks( nodes, &round, idxProposers, @@ -49,15 +52,15 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { ) shardIdToRollbackLastBlock := uint32(0) - rollBackOneBlock(nodes, shardIdToRollbackLastBlock) - resetHighestProbableNonce(nodes, shardIdToRollbackLastBlock, 2) - emptyDataPools(nodes, shardIdToRollbackLastBlock) + integrationTests.ForkChoiceOneBlock(nodes, shardIdToRollbackLastBlock) + integrationTests.ResetHighestProbableNonce(nodes, shardIdToRollbackLastBlock, 2) + integrationTests.EmptyDataPools(nodes, shardIdToRollbackLastBlock) //revert also the nonce, so the same block nonce will be used when shard will propose the next block atomic.AddUint64(nonces[idxNonceShard], ^uint64(0)) numRoundsBlocksAreProposedOnlyByMeta := 2 - proposeAndSyncBlocks( + integrationTests.ProposeBlocks( nodes, &round, []int{idxProposerMeta}, @@ -66,7 +69,7 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { ) secondNumRoundsBlocksAreProposedCorrectly := 2 - proposeAndSyncBlocks( + integrationTests.ProposeBlocks( nodes, &round, idxProposers, @@ -88,11 +91,11 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { syncNodesSlice := []*integrationTests.TestProcessorNode{syncMetaNode} integrationTests.StartP2pBootstrapOnProcessorNodes(syncNodesSlice) - startSyncingBlocks(syncNodesSlice) + integrationTests.StartSyncingBlocks(syncNodesSlice) //after joining the network we must propose a new block on the metachain as to be received by the sync //node and to start the bootstrapping process - proposeAndSyncBlocks( + integrationTests.ProposeBlocks( nodes, &round, []int{idxProposerMeta}, @@ -103,8 +106,8 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { numOfRoundsToWaitToCatchUp := numRoundsBlocksAreProposedCorrectly + numRoundsBlocksAreProposedOnlyByMeta + secondNumRoundsBlocksAreProposedCorrectly - time.Sleep(stepSync * time.Duration(numOfRoundsToWaitToCatchUp)) - updateRound(nodes, round) + time.Sleep(integrationTests.SyncDelay * time.Duration(numOfRoundsToWaitToCatchUp)) + integrationTests.UpdateRound(nodes, round) nonceProposerMeta := nodes[idxProposerMeta].BlockChain.GetCurrentBlockHeader().GetNonce() nonceSyncNode := syncMetaNode.BlockChain.GetCurrentBlockHeader().GetNonce() diff --git a/integrationTests/testGameHelperFunctions.go b/integrationTests/testGameHelperFunctions.go index cceb1d96848..0eb27df9258 100644 --- a/integrationTests/testGameHelperFunctions.go +++ b/integrationTests/testGameHelperFunctions.go @@ -10,7 +10,6 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/stretchr/testify/assert" ) @@ -47,13 +46,13 @@ func ScCallTxWithParams( _, _ = senderNode.SendTransaction(txDeploy) fmt.Println("Delaying for disseminating the deploy tx...") - time.Sleep(stepDelay) + time.Sleep(StepDelay) } // DeployScTx creates and sends a SC tx -func DeployScTx(nodes []*TestProcessorNode, senderIdx int, scCode string) { +func DeployScTx(nodes []*TestProcessorNode, senderIdx int, scCode string, vmType []byte) { fmt.Println("Deploying SC...") - data := scCode + "@" + hex.EncodeToString(factory.IELEVirtualMachine) + data := scCode + "@" + hex.EncodeToString(vmType) txDeploy := generateTx( nodes[senderIdx].OwnAccount.SkTxSign, nodes[senderIdx].OwnAccount.SingleSigner, @@ -69,7 +68,7 @@ func DeployScTx(nodes []*TestProcessorNode, senderIdx int, scCode string) { nodes[senderIdx].OwnAccount.Nonce++ _, _ = nodes[senderIdx].SendTransaction(txDeploy) fmt.Println("Delaying for disseminating the deploy tx...") - time.Sleep(stepDelay) + time.Sleep(StepDelay) fmt.Println(MakeDisplayTable(nodes)) } @@ -167,7 +166,7 @@ func NodeDoesWithdraw( nodes[idxNode].OwnAccount.Nonce++ _, _ = nodes[idxNode].SendTransaction(txScCall) fmt.Println("Delaying for disseminating SC call tx...") - time.Sleep(stepDelay) + time.Sleep(StepDelay) fmt.Println(MakeDisplayTable(nodes)) } @@ -195,7 +194,7 @@ func NodeDoesTopUp( nodes[idxNode].OwnAccount.Nonce++ _, _ = nodes[idxNode].SendTransaction(txScCall) fmt.Println("Delaying for disseminating SC call tx...") - time.Sleep(stepDelay) + time.Sleep(StepDelay) fmt.Println(MakeDisplayTable(nodes)) } diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 24baa841985..bab17959bcc 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" + "io/ioutil" "math/big" "strings" "sync" @@ -15,7 +16,7 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/core/genesis" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber" @@ -27,12 +28,15 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state/factory" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/trie/evictionWaitingList" "github.com/ElrondNetwork/elrond-go/data/typeConverters" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/epochStart/genesis" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" @@ -56,8 +60,14 @@ import ( "github.com/stretchr/testify/assert" ) -var stepDelay = time.Second -var p2pBootstrapStepDelay = 5 * time.Second +// StepDelay is used so that transactions can disseminate properly +var StepDelay = time.Second + +// SyncDelay is used so that nodes have enough time to sync +var SyncDelay = time.Second * 2 + +// P2pBootstrapDelay is used so that nodes have enough time to bootstrap +var P2pBootstrapDelay = 5 * time.Second // GetConnectableAddress returns a non circuit, non windows default connectable address for provided messenger func GetConnectableAddress(mes p2p.Messenger) string { @@ -80,7 +90,7 @@ func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Mess sk, nil, loadBalancer.NewOutgoingChannelLoadBalancer(), - discovery.NewKadDhtPeerDiscoverer(stepDelay, "test", []string{initialAddr}), + discovery.NewKadDhtPeerDiscoverer(StepDelay, "test", []string{initialAddr}), ) if err != nil { fmt.Println(err.Error()) @@ -92,26 +102,22 @@ func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Mess // CreateTestShardDataPool creates a test data pool for shard nodes func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dataRetriever.PoolsHolder { if txPool == nil { - txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) + txPool, _ = txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) } uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: 1}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: 1} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} - hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + hdrPool, _ := headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} + cacherCfg := storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + cacherCfg = storageUnit.CacheConfig{Size: 50000, Type: storageUnit.LRUCache} + trieNodes, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) currTxs, _ := dataPool.NewCurrentBlockPool() @@ -120,10 +126,9 @@ func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dat uTxPool, rewardsTxPool, hdrPool, - hdrNonces, txBlockBody, peerChangeBlockBody, - metaBlocks, + trieNodes, currTxs, ) @@ -132,28 +137,23 @@ func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dat // CreateTestMetaDataPool creates a test data pool for meta nodes func CreateTestMetaDataPool() dataRetriever.MetaPoolsHolder { - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} + cacherCfg := storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} - shardHeaders, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + headers, _ := headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) - shardHeadersNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - shardHeadersNonces, _ := dataPool.NewNonceSyncMapCacher(shardHeadersNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + cacherCfg = storageUnit.CacheConfig{Size: 50000, Type: storageUnit.LRUCache} + trieNodes, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) currTxs, _ := dataPool.NewCurrentBlockPool() dPool, _ := dataPool.NewMetaDataPool( - metaBlocks, txBlockBody, - shardHeaders, - shardHeadersNonces, + trieNodes, + headers, txPool, uTxPool, currTxs, @@ -215,10 +215,21 @@ func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageServ // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage func CreateAccountsDB(accountType factory.Type) (*state.AccountsDB, data.Trie, storage.Storer) { - hasher := sha256.Sha256{} store := CreateMemUnit() + ewl, _ := evictionWaitingList.NewEvictionWaitingList(100, memorydb.New(), TestMarshalizer) + + // TODO change this implementation with a factory + tempDir, _ := ioutil.TempDir("", "integrationTests") + cfg := &config.DBConfig{ + FilePath: tempDir, + Type: string(storageUnit.LvlDbSerial), + BatchDelaySeconds: 4, + MaxBatchSize: 10000, + MaxOpenFiles: 10, + } + trieStorage, _ := trie.NewTrieStorageManager(store, cfg, ewl) - tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) + tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher) accountFactory, _ := factory.NewAccountFactoryCreator(accountType) adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) @@ -317,11 +328,13 @@ func CreateGenesisBlocks( uint64Converter typeConverters.Uint64ByteSliceConverter, metaDataPool dataRetriever.MetaPoolsHolder, economics *economics.EconomicsData, + rootHash []byte, ) map[uint32]data.HeaderHandler { genesisBlocks := make(map[uint32]data.HeaderHandler) for shardId := uint32(0); shardId < shardCoordinator.NumberOfShards(); shardId++ { - genesisBlocks[shardId] = CreateSimpleGenesisBlock(shardId) + genesisBlock := CreateSimpleGenesisBlock(shardId) + genesisBlocks[shardId] = genesisBlock } genesisBlocks[sharding.MetachainShardId] = CreateGenesisMetaBlock( @@ -336,6 +349,7 @@ func CreateGenesisBlocks( uint64Converter, metaDataPool, economics, + rootHash, ) return genesisBlocks @@ -354,6 +368,7 @@ func CreateGenesisMetaBlock( uint64Converter typeConverters.Uint64ByteSliceConverter, metaDataPool dataRetriever.MetaPoolsHolder, economics *economics.EconomicsData, + rootHash []byte, ) data.HeaderHandler { argsMetaGenesis := genesis.ArgsMetaGenesisBlockCreator{ GenesisTime: 0, @@ -368,7 +383,7 @@ func CreateGenesisMetaBlock( Uint64ByteSliceConverter: uint64Converter, MetaDatapool: metaDataPool, Economics: economics, - ValidatorStatsRootHash: []byte("validator stats root hash"), + ValidatorStatsRootHash: rootHash, } if shardCoordinator.SelfId() != sharding.MetachainShardId { @@ -393,6 +408,7 @@ func CreateGenesisMetaBlock( metaHdr, _ := genesis.CreateMetaGenesisBlock(argsMetaGenesis) fmt.Printf("meta genesis root hash %s \n", hex.EncodeToString(metaHdr.GetRootHash())) + fmt.Printf("meta genesis validatorStatistics %d %s \n", shardCoordinator.SelfId(), hex.EncodeToString(metaHdr.GetValidatorStatsRootHash())) return metaHdr } @@ -448,7 +464,6 @@ func MakeDisplayTable(nodes []*TestProcessorNode) string { ) } table, _ := display.CreateTableString(header, dataLines) - return table } @@ -556,6 +571,8 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr return fee }, }, + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) return txProcessor @@ -563,7 +580,9 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr // CreateNewDefaultTrie returns a new trie with test hasher and marsahalizer func CreateNewDefaultTrie() data.Trie { - tr, _ := trie.NewTrie(CreateMemUnit(), TestMarshalizer, TestHasher) + ewl, _ := evictionWaitingList.NewEvictionWaitingList(100, memorydb.New(), TestMarshalizer) + trieStorage, _ := trie.NewTrieStorageManager(CreateMemUnit(), &config.DBConfig{}, ewl) + tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher) return tr } @@ -631,10 +650,6 @@ func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, fmt.Println("All shards propose blocks...") for idx, n := range nodes { - // set the consensus reward addresses as rewards processor expects at least valid round - // otherwise the produced rewards will not be valid on verification - n.BlockProcessor.SetConsensusData([]byte("randomness"), round, 0, n.ShardCoordinator.SelfId()) - if !IsIntInSlice(idx, idxProposers) { continue } @@ -645,7 +660,7 @@ func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, } fmt.Println("Delaying for disseminating headers and miniblocks...") - time.Sleep(stepDelay) + time.Sleep(StepDelay) fmt.Println(MakeDisplayTable(nodes)) } @@ -670,7 +685,7 @@ func SyncBlock( } } - time.Sleep(stepDelay) + time.Sleep(StepDelay) fmt.Println(MakeDisplayTable(nodes)) } @@ -783,7 +798,7 @@ func extractUint64ValueFromTxHandler(txHandler data.TransactionHandler) uint64 { return tx.Nonce } - buff, _ := hex.DecodeString(txHandler.GetData()) + buff := txHandler.GetData() return binary.BigEndian.Uint64(buff) } @@ -794,7 +809,6 @@ func CreateNodes( numMetaChainNodes int, serviceID string, ) []*TestProcessorNode { - //first node generated will have is pk belonging to firstSkShardId nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) idx := 0 @@ -816,6 +830,35 @@ func CreateNodes( return nodes } +// CreateNodesWithCustomStateCheckpointModulus creates multiple nodes in different shards with custom stateCheckpointModulus +func CreateNodesWithCustomStateCheckpointModulus( + numOfShards int, + nodesPerShard int, + numMetaChainNodes int, + serviceID string, + stateCheckpointModulus uint, +) []*TestProcessorNode { + nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) + + idx := 0 + for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { + for j := 0; j < nodesPerShard; j++ { + n := NewTestProcessorNodeWithStateCheckpointModulus(uint32(numOfShards), shardId, shardId, serviceID, stateCheckpointModulus) + + nodes[idx] = n + idx++ + } + } + + for i := 0; i < numMetaChainNodes; i++ { + metaNode := NewTestProcessorNodeWithStateCheckpointModulus(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID, stateCheckpointModulus) + idx = i + numOfShards*nodesPerShard + nodes[idx] = metaNode + } + + return nodes +} + // DisplayAndStartNodes prints each nodes shard ID, sk and pk, and then starts the node func DisplayAndStartNodes(nodes []*TestProcessorNode) { for _, n := range nodes { @@ -832,7 +875,7 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { } fmt.Println("Delaying for node bootstrap and topic announcement...") - time.Sleep(p2pBootstrapStepDelay) + time.Sleep(P2pBootstrapDelay) } // SetEconomicsParameters will set maxGasLimitPerBlock, minGasPrice and minGasLimits to provided nodes @@ -906,7 +949,7 @@ func CreateAndSendTransaction( Value: txValue, SndAddr: node.OwnAccount.Address.Bytes(), RcvAddr: rcvAddress, - Data: txData, + Data: []byte(txData), GasPrice: MinTxGasPrice, GasLimit: MinTxGasLimit*100 + uint64(len(txData)), } @@ -918,6 +961,30 @@ func CreateAndSendTransaction( node.OwnAccount.Nonce++ } +func CreateAndSendTransactionWithGasLimit( + node *TestProcessorNode, + txValue *big.Int, + gasLimit uint64, + rcvAddress []byte, + txData []byte, +) { + tx := &transaction.Transaction{ + Nonce: node.OwnAccount.Nonce, + Value: txValue, + SndAddr: node.OwnAccount.Address.Bytes(), + RcvAddr: rcvAddress, + Data: txData, + GasPrice: MinTxGasPrice, + GasLimit: gasLimit, + } + + txBuff, _ := TestMarshalizer.Marshal(tx) + tx.Signature, _ = node.OwnAccount.SingleSigner.Sign(node.OwnAccount.SkTxSign, txBuff) + + _, _ = node.SendTransaction(tx) + node.OwnAccount.Nonce++ +} + type txArgs struct { nonce uint64 value *big.Int @@ -943,7 +1010,7 @@ func generateTransferTx( Value: valToTransfer, RcvAddr: receiverPubKeyBytes, SndAddr: skToPk(senderPrivateKey), - Data: "", + Data: []byte(""), GasLimit: gasLimit, GasPrice: gasPrice, } @@ -966,7 +1033,7 @@ func generateTx( SndAddr: args.sndAddr, GasPrice: args.gasPrice, GasLimit: args.gasLimit, - Data: args.data, + Data: []byte(args.data), } txBuff, _ := TestMarshalizer.Marshal(tx) tx.Signature, _ = signer.Sign(skSign, txBuff) @@ -1139,7 +1206,7 @@ func ProposeBlockSignalsEmptyBlock( isEmptyBlock := len(txHashes) == 0 fmt.Println("Delaying for disseminating headers and miniblocks...") - time.Sleep(stepDelay) + time.Sleep(StepDelay) return header, body, isEmptyBlock } @@ -1237,7 +1304,7 @@ func CreateResolversDataPool( txHashes := make([][]byte, maxTxs) txsSndAddr := make([][]byte, 0) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100, Shards: 1}) for i := 0; i < maxTxs; i++ { tx, txHash := generateValidTx(t, shardCoordinator, senderShardID, recvShardId) @@ -1269,7 +1336,7 @@ func generateValidTx( _, _ = accnts.Commit() mockNode, _ := node.NewNode( - node.WithMarshalizer(TestMarshalizer), + node.WithMarshalizer(TestMarshalizer, 100), node.WithHasher(TestHasher), node.WithAddressConverter(TestAddressConverter), node.WithKeyGen(signing.NewKeyGenerator(kyber.NewBlakeSHA256Ed25519())), @@ -1434,8 +1501,8 @@ func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *Cr keyGen := signing.NewKeyGenerator(suite) keysMap := make(map[uint32][]*TestKeyPair) - keyPairs := make([]*TestKeyPair, nodesPerShard) for shardId := uint32(0); shardId < nbShards; shardId++ { + keyPairs := make([]*TestKeyPair, nodesPerShard) for n := 0; n < nodesPerShard; n++ { kp := &TestKeyPair{} kp.Sk, kp.Pk = keyGen.GeneratePair() @@ -1444,7 +1511,7 @@ func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *Cr keysMap[shardId] = keyPairs } - keyPairs = make([]*TestKeyPair, nbMetaNodes) + keyPairs := make([]*TestKeyPair, nbMetaNodes) for n := 0; n < nbMetaNodes; n++ { kp := &TestKeyPair{} kp.Sk, kp.Pk = keyGen.GeneratePair() @@ -1476,5 +1543,168 @@ func StartP2pBootstrapOnProcessorNodes(nodes []*TestProcessorNode) { } fmt.Println("Delaying for nodes p2p bootstrap...") - time.Sleep(p2pBootstrapStepDelay) + time.Sleep(P2pBootstrapDelay) +} + +// SetupSyncNodesOneShardAndMeta creates nodes with sync capabilities divided into one shard and a metachain +func SetupSyncNodesOneShardAndMeta( + numNodesPerShard int, + numNodesMeta int, +) ([]*TestProcessorNode, p2p.Messenger, []int) { + + maxShards := uint32(1) + shardId := uint32(0) + + advertiser := CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + advertiserAddr := GetConnectableAddress(advertiser) + + nodes := make([]*TestProcessorNode, 0) + for i := 0; i < numNodesPerShard; i++ { + shardNode := NewTestSyncNode( + maxShards, + shardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, shardNode) + } + idxProposerShard0 := 0 + + for i := 0; i < numNodesMeta; i++ { + metaNode := NewTestSyncNode( + maxShards, + sharding.MetachainShardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, metaNode) + } + idxProposerMeta := len(nodes) - 1 + + idxProposers := []int{idxProposerShard0, idxProposerMeta} + + return nodes, advertiser, idxProposers +} + +// StartSyncingBlocks starts the syncing process of all the nodes +func StartSyncingBlocks(nodes []*TestProcessorNode) { + for _, n := range nodes { + _ = n.StartSync() + } + + fmt.Println("Delaying for nodes to start syncing blocks...") + time.Sleep(StepDelay) +} + +// ForkChoiceOneBlock rollbacks a block from the given shard +func ForkChoiceOneBlock(nodes []*TestProcessorNode, shardId uint32) { + for idx, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + err := n.Bootstrapper.RollBack(false) + if err != nil { + fmt.Println(err) + } + + newNonce := n.BlockChain.GetCurrentBlockHeader().GetNonce() + fmt.Printf("Node's id %d is at block height %d\n", idx, newNonce) + } +} + +// ResetHighestProbableNonce resets the highest probable nonce +func ResetHighestProbableNonce(nodes []*TestProcessorNode, shardId uint32, targetNonce uint64) { + for _, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + if n.BlockChain.GetCurrentBlockHeader().GetNonce() != targetNonce { + continue + } + + n.Bootstrapper.SetProbableHighestNonce(targetNonce) + } +} + +// EmptyDataPools clears all the data pools +func EmptyDataPools(nodes []*TestProcessorNode, shardId uint32) { + for _, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + + emptyNodeDataPool(n) + } +} + +func emptyNodeDataPool(node *TestProcessorNode) { + if node.ShardDataPool != nil { + emptyShardDataPool(node.ShardDataPool) + } + if node.MetaDataPool != nil { + emptyMetaDataPool(node.MetaDataPool) + } +} + +func emptyShardDataPool(sdp dataRetriever.PoolsHolder) { + sdp.Headers().Clear() + sdp.UnsignedTransactions().Clear() + sdp.Transactions().Clear() + sdp.MiniBlocks().Clear() + sdp.PeerChangesBlocks().Clear() +} + +func emptyMetaDataPool(holder dataRetriever.MetaPoolsHolder) { + holder.Headers().Clear() + holder.MiniBlocks().Clear() +} + +// UpdateRound updates the round for every node +func UpdateRound(nodes []*TestProcessorNode, round uint64) { + for _, n := range nodes { + n.Rounder.IndexField = int64(round) + } +} + +// ProposeBlocks proposes blocks for a given number of rounds +func ProposeBlocks( + nodes []*TestProcessorNode, + round *uint64, + idxProposers []int, + nonces []*uint64, + numOfRounds int, +) { + + for i := 0; i < numOfRounds; i++ { + crtRound := atomic.LoadUint64(round) + proposeBlocks(nodes, idxProposers, nonces, crtRound) + + time.Sleep(SyncDelay) + + crtRound = IncrementAndPrintRound(crtRound) + atomic.StoreUint64(round, crtRound) + UpdateRound(nodes, crtRound) + IncrementNonces(nonces) + } + time.Sleep(SyncDelay) +} + +// IncrementNonces increments all the nonces +func IncrementNonces(nonces []*uint64) { + for i := 0; i < len(nonces); i++ { + atomic.AddUint64(nonces[i], 1) + } +} + +func proposeBlocks( + nodes []*TestProcessorNode, + idxProposers []int, + nonces []*uint64, + crtRound uint64, +) { + for idx, proposer := range idxProposers { + crtNonce := atomic.LoadUint64(nonces[idx]) + ProposeBlock(nodes, []int{proposer}, crtRound, crtNonce) + } } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 72f89dc8267..e212be72f1c 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1,10 +1,12 @@ package integrationTests import ( + "bytes" "context" "encoding/hex" "fmt" "math/big" + "sort" "strconv" "sync/atomic" "time" @@ -30,6 +32,8 @@ import ( metafactoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/metachain" factoryDataRetriever "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/shard" "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" + "github.com/ElrondNetwork/elrond-go/epochStart/metachain" + "github.com/ElrondNetwork/elrond-go/epochStart/shardchain" "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/marshal" @@ -46,10 +50,13 @@ import ( procFactory "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/peer" + "github.com/ElrondNetwork/elrond-go/process/rating" "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" scToProtocol2 "github.com/ElrondNetwork/elrond-go/process/scToProtocol" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" + "github.com/ElrondNetwork/elrond-go/process/track" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage/timecache" @@ -67,6 +74,9 @@ var TestMarshalizer = &marshal.JsonMarshalizer{} // TestAddressConverter represents a plain address converter var TestAddressConverter, _ = addressConverters.NewPlainAddressConverter(32, "0x") +// TestAddressConverterBLS represents an address converter from BLS public keys +var TestAddressConverterBLS, _ = addressConverters.NewPlainAddressConverter(128, "0x") + // TestMultiSig represents a mock multisig var TestMultiSig = mock.NewMultiSigner(1) @@ -77,7 +87,6 @@ var TestKeyGenForAccounts = signing.NewKeyGenerator(kyber.NewBlakeSHA256Ed25519( var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() // MinTxGasPrice defines minimum gas price required by a transaction -//TODO refactor all tests to pass with a non zero value var MinTxGasPrice = uint64(10) // MinTxGasLimit defines minimum gas limit required by a transaction @@ -102,6 +111,11 @@ const roundDuration = 5 * time.Second // IntegrationTestsChainID is the chain ID identifier used in integration tests, processing nodes var IntegrationTestsChainID = []byte("integration tests chain ID") +// sizeCheckDelta the maximum allowed bufer overhead (p2p unmarshalling) +const sizeCheckDelta = 100 + +const stateCheckpointModulus = 100 + // TestKeyPair holds a pair of private/public Keys type TestKeyPair struct { Sk crypto.PrivateKey @@ -131,12 +145,14 @@ type TestProcessorNode struct { Storage dataRetriever.StorageService PeerState state.AccountsAdapter AccntState state.AccountsAdapter + StateTrie data.Trie BlockChain data.ChainHandler GenesisBlocks map[uint32]data.HeaderHandler EconomicsData *economics.TestEconomicsData BlackListHandler process.BlackListHandler + BlockTracker process.BlockTracker InterceptorsContainer process.InterceptorsContainer ResolversContainer dataRetriever.ResolversContainer ResolverFinder dataRetriever.ResolversFinder @@ -164,9 +180,13 @@ type TestProcessorNode struct { StorageBootstrapper *mock.StorageBootstrapperMock RequestedItemsHandler dataRetriever.RequestedItemsHandler + EpochStartTrigger TestEpochStartTrigger + MultiSigner crypto.MultiSigner HeaderSigVerifier process.InterceptedHeaderSigVerifier + ValidatorStatisticsProcessor process.ValidatorStatisticsProcessor + //Node is used to call the functionality already implemented in it Node *node.Node SCQueryService external.SCQueryService @@ -179,7 +199,7 @@ type TestProcessorNode struct { ChainID []byte } -// NewTestProcessorNode returns a new TestProcessorNode instance +// NewTestProcessorNode returns a new TestProcessorNode instance with a libp2p messenger func NewTestProcessorNode( maxShards uint32, nodeShardId uint32, @@ -192,13 +212,11 @@ func NewTestProcessorNode( kg := &mock.KeyGenMock{} sk, pk := kg.GeneratePair() - pkAddr := []byte("aaa00000000000000000000000000000") + pkBytes := make([]byte, 128) + address := make([]byte, 32) nodesCoordinator := &mock.NodesCoordinatorMock{ ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { - - address := pkAddr - v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, address) - + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkBytes, address) return []sharding.Validator{v}, nil }, } @@ -267,8 +285,9 @@ func (tpn *TestProcessorNode) initTestNode() { tpn.ShardCoordinator, tpn.NodesCoordinator, ) + tpn.initRounder() tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(factory2.UserAccount) + tpn.AccntState, tpn.StateTrie, _ = CreateAccountsDB(factory2.UserAccount) tpn.PeerState, _, _ = CreateAccountsDB(factory2.ValidatorAccount) tpn.initChainHandler() tpn.initEconomicsData() @@ -277,6 +296,8 @@ func (tpn *TestProcessorNode) initTestNode() { tpn.initResolvers() tpn.initInnerProcessors() tpn.SCQueryService, _ = smartContract.NewSCQueryService(tpn.VMContainer, tpn.EconomicsData.MaxGasLimitPerBlock()) + tpn.initValidatorStatistics() + rootHash, _ := tpn.ValidatorStatisticsProcessor.RootHash() tpn.GenesisBlocks = CreateGenesisBlocks( tpn.AccntState, TestAddressConverter, @@ -289,8 +310,9 @@ func (tpn *TestProcessorNode) initTestNode() { TestUint64Converter, tpn.MetaDataPool, tpn.EconomicsData.EconomicsData, + rootHash, ) - tpn.initBlockProcessor() + tpn.initBlockProcessor(stateCheckpointModulus) tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( TestMarshalizer, tpn.Messenger, @@ -347,14 +369,25 @@ func (tpn *TestProcessorNode) initEconomicsData() { BurnPercentage: 0.40, }, FeeSettings: config.FeeSettings{ - MaxGasLimitPerBlock: maxGasLimitPerBlock, - MinGasPrice: minGasPrice, - MinGasLimit: minGasLimit, + MaxGasLimitPerBlock: maxGasLimitPerBlock, + MinGasPrice: minGasPrice, + MinGasLimit: minGasLimit, + GasPerDataByte: "1", + DataLimitForBaseCalc: "10000", }, ValidatorSettings: config.ValidatorSettings{ StakeValue: "500", UnBoundPeriod: "5", }, + RatingSettings: config.RatingSettings{ + StartRating: 500000, + MaxRating: 1000000, + MinRating: 1, + ProposerDecreaseRatingStep: 3858, + ProposerIncreaseRatingStep: 1929, + ValidatorDecreaseRatingStep: 61, + ValidatorIncreaseRatingStep: 31, + }, }, ) @@ -388,6 +421,7 @@ func (tpn *TestProcessorNode) initInterceptors() { tpn.BlackListHandler, tpn.HeaderSigVerifier, tpn.ChainID, + sizeCheckDelta, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -415,6 +449,7 @@ func (tpn *TestProcessorNode) initInterceptors() { tpn.BlackListHandler, tpn.HeaderSigVerifier, tpn.ChainID, + sizeCheckDelta, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -436,6 +471,8 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.MetaDataPool, TestUint64Converter, dataPacker, + tpn.StateTrie, + 100, ) tpn.ResolversContainer, _ = resolversContainerFactory.Create() @@ -443,11 +480,6 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.RequestHandler, _ = requestHandlers.NewMetaResolverRequestHandler( tpn.ResolverFinder, tpn.RequestedItemsHandler, - factory.ShardHeadersForMetachainTopic, - factory.MetachainBlocksTopic, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.MiniBlocksTopic, 100, ) } else { @@ -459,6 +491,8 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.ShardDataPool, TestUint64Converter, dataPacker, + tpn.StateTrie, + 100, ) tpn.ResolversContainer, _ = resolversContainerFactory.Create() @@ -466,13 +500,8 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.RequestHandler, _ = requestHandlers.NewShardResolverRequestHandler( tpn.ResolverFinder, tpn.RequestedItemsHandler, - factory.TransactionTopic, - factory.UnsignedTransactionTopic, - factory.RewardsTransactionTopic, - factory.MiniBlocksTopic, - factory.HeadersTopic, - factory.MetachainBlocksTopic, 100, + tpn.ShardCoordinator.SelfId(), ) } } @@ -492,6 +521,10 @@ func (tpn *TestProcessorNode) initInnerProcessors() { return } + if tpn.ValidatorStatisticsProcessor == nil { + tpn.ValidatorStatisticsProcessor = &mock.ValidatorStatisticsProcessorMock{} + } + interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( tpn.ShardCoordinator, TestMarshalizer, @@ -556,6 +589,8 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.GasHandler, ) + receiptsHandler, _ := tpn.InterimProcContainer.Get(dataBlock.ReceiptBlock) + badBlocskHandler, _ := tpn.InterimProcContainer.Get(dataBlock.InvalidBlock) tpn.TxProcessor, _ = transaction.NewTxProcessor( tpn.AccntState, TestHasher, @@ -566,6 +601,8 @@ func (tpn *TestProcessorNode) initInnerProcessors() { rewardsHandler, txTypeHandler, tpn.EconomicsData, + receiptsHandler, + badBlocskHandler, ) tpn.MiniBlocksCompacter, _ = preprocess.NewMiniBlocksCompaction(tpn.EconomicsData, tpn.ShardCoordinator, tpn.GasHandler) @@ -591,6 +628,8 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.PreProcessorsContainer, _ = fact.Create() tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator( + TestHasher, + TestMarshalizer, tpn.ShardCoordinator, tpn.AccntState, tpn.ShardDataPool.MiniBlocks(), @@ -677,6 +716,8 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors() { tpn.PreProcessorsContainer, _ = fact.Create() tpn.TxCoordinator, _ = coordinator.NewTransactionCoordinator( + TestHasher, + TestMarshalizer, tpn.ShardCoordinator, tpn.AccntState, tpn.MetaDataPool.MiniBlocks(), @@ -687,6 +728,48 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors() { ) } +func (tpn *TestProcessorNode) initValidatorStatistics() { + var peerDataPool peer.DataPool = tpn.MetaDataPool + if tpn.ShardCoordinator.SelfId() < tpn.ShardCoordinator.NumberOfShards() { + peerDataPool = tpn.ShardDataPool + } + + initialNodes := make([]*sharding.InitialNode, 0) + nodesMap := tpn.NodesCoordinator.GetAllValidatorsPublicKeys() + for _, pks := range nodesMap { + for _, pk := range pks { + validator, _, _ := tpn.NodesCoordinator.GetValidatorWithPublicKey(pk) + n := &sharding.InitialNode{ + PubKey: core.ToHex(validator.PubKey()), + Address: core.ToHex(validator.Address()), + NodeInfo: sharding.NodeInfo{}, + } + initialNodes = append(initialNodes, n) + } + } + + sort.Slice(initialNodes, func(i, j int) bool { + return bytes.Compare([]byte(initialNodes[i].PubKey), []byte(initialNodes[j].PubKey)) > 0 + }) + + rater, _ := rating.NewBlockSigningRater(tpn.EconomicsData.RatingsData()) + + arguments := peer.ArgValidatorStatisticsProcessor{ + InitialNodes: initialNodes, + PeerAdapter: tpn.PeerState, + AdrConv: TestAddressConverterBLS, + NodesCoordinator: tpn.NodesCoordinator, + ShardCoordinator: tpn.ShardCoordinator, + DataPool: peerDataPool, + StorageService: tpn.Storage, + Marshalizer: TestMarshalizer, + StakeValue: big.NewInt(500), + Rater: rater, + } + + tpn.ValidatorStatisticsProcessor, _ = peer.NewValidatorStatisticsProcessor(arguments) +} + func (tpn *TestProcessorNode) addMockVm(blockchainHook vmcommon.BlockchainHook) { mockVM, _ := mock.NewOneSCExecutorMockVM(blockchainHook, TestHasher) mockVM.GasForOperation = OpGasValueForMockVm @@ -694,11 +777,11 @@ func (tpn *TestProcessorNode) addMockVm(blockchainHook vmcommon.BlockchainHook) _ = tpn.VMContainer.Add(factory.InternalTestingVM, mockVM) } -func (tpn *TestProcessorNode) initBlockProcessor() { +func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { var err error tpn.ForkDetector = &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { @@ -707,8 +790,19 @@ func (tpn *TestProcessorNode) initBlockProcessor() { ProbableHighestNonceCalled: func() uint64 { return 0 }, + GetHighestFinalBlockHashCalled: func() []byte { + return nil + }, } + argsHeaderValidator := block.ArgsHeaderValidator{ + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + } + headerValidator, _ := block.NewHeaderValidator(argsHeaderValidator) + + tpn.initBlockTracker(headerValidator) + argumentsBase := block.ArgBaseProcessor{ Accounts: tpn.AccntState, ForkDetector: tpn.ForkDetector, @@ -719,20 +813,38 @@ func (tpn *TestProcessorNode) initBlockProcessor() { NodesCoordinator: tpn.NodesCoordinator, SpecialAddressHandler: tpn.SpecialAddressHandler, Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, RequestHandler: tpn.RequestHandler, Core: nil, BlockChainHook: tpn.BlockchainHook, - ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorMock{}, + ValidatorStatisticsProcessor: tpn.ValidatorStatisticsProcessor, + HeaderValidator: headerValidator, Rounder: &mock.RounderMock{}, BootStorer: &mock.BoostrapStorerMock{ PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { return nil }, }, + BlockTracker: tpn.BlockTracker, } if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { + + argsEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ + GenesisTime: argumentsBase.Rounder.TimeStamp(), + Settings: &config.EpochStartConfig{ + MinRoundsBetweenEpochs: 1000, + RoundsPerEpoch: 10000, + }, + Epoch: 0, + EpochStartNotifier: &mock.EpochStartNotifierStub{}, + Storage: tpn.Storage, + Marshalizer: TestMarshalizer, + } + epochStartTrigger, _ := metachain.NewEpochStartTrigger(argsEpochStart) + tpn.EpochStartTrigger = &metachain.TestTrigger{} + tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + + argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger argumentsBase.TxCoordinator = tpn.TxCoordinator blsKeyedAddressConverter, _ := addressConverters.NewPlainAddressConverter( @@ -756,16 +868,37 @@ func (tpn *TestProcessorNode) initBlockProcessor() { SCDataGetter: tpn.SCQueryService, SCToProtocol: scToProtocol, PeerChangesHandler: scToProtocol, + PendingMiniBlocks: &mock.PendingMiniBlocksHandlerStub{}, } tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) + } else { + argsShardEpochStart := &shardchain.ArgsShardEpochStartTrigger{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + HeaderValidator: headerValidator, + Uint64Converter: TestUint64Converter, + DataPool: tpn.ShardDataPool, + Storage: tpn.Storage, + RequestHandler: tpn.RequestHandler, + Epoch: 0, + Validity: 1, + Finality: 1, + EpochStartNotifier: &mock.EpochStartNotifierStub{}, + } + epochStartTrigger, _ := shardchain.NewEpochStartTrigger(argsShardEpochStart) + tpn.EpochStartTrigger = &shardchain.TestTrigger{} + tpn.EpochStartTrigger.SetTrigger(epochStartTrigger) + + argumentsBase.EpochStartTrigger = tpn.EpochStartTrigger argumentsBase.BlockChainHook = tpn.BlockchainHook argumentsBase.TxCoordinator = tpn.TxCoordinator arguments := block.ArgShardProcessor{ - ArgBaseProcessor: argumentsBase, - DataPool: tpn.ShardDataPool, - TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, + ArgBaseProcessor: argumentsBase, + DataPool: tpn.ShardDataPool, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, + StateCheckpointModulus: stateCheckpointModulus, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) @@ -788,7 +921,7 @@ func (tpn *TestProcessorNode) initNode() { tpn.Node, err = node.NewNode( node.WithMessenger(tpn.Messenger), - node.WithMarshalizer(TestMarshalizer), + node.WithMarshalizer(TestMarshalizer, 100), node.WithHasher(TestHasher), node.WithHasher(TestHasher), node.WithAddressConverter(TestAddressConverter), @@ -849,16 +982,12 @@ func (tpn *TestProcessorNode) SendTransaction(tx *dataTransaction.Transaction) ( } func (tpn *TestProcessorNode) addHandlersForCounters() { - metaHandlers := func(key []byte) { - atomic.AddInt32(&tpn.CounterMetaRcv, 1) - } - hdrHandlers := func(key []byte) { + hdrHandlers := func(header data.HeaderHandler, key []byte) { atomic.AddInt32(&tpn.CounterHdrRecv, 1) } if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.MetaDataPool.ShardHeaders().RegisterHandler(hdrHandlers) - tpn.MetaDataPool.MetaBlocks().RegisterHandler(metaHandlers) + tpn.MetaDataPool.Headers().RegisterHandler(hdrHandlers) } else { txHandler := func(key []byte) { atomic.AddInt32(&tpn.CounterTxRecv, 1) @@ -871,7 +1000,6 @@ func (tpn *TestProcessorNode) addHandlersForCounters() { tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) - tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) } } @@ -895,7 +1023,7 @@ func (tpn *TestProcessorNode) LoadTxSignSkBytes(skBytes []byte) { // ProposeBlock proposes a new block func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.BodyHandler, data.HeaderHandler, [][]byte) { startTime := time.Now() - maxTime := time.Second * 200000 + maxTime := time.Second * 2 haveTime := func() bool { elapsedTime := time.Since(startTime) @@ -905,6 +1033,7 @@ func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.Bod blockHeader := tpn.BlockProcessor.CreateNewHeader() + blockHeader.SetShardID(tpn.ShardCoordinator.SelfId()) blockHeader.SetRound(round) blockHeader.SetNonce(nonce) blockHeader.SetPubKeysBitmap([]byte{1}) @@ -927,7 +1056,7 @@ func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.Bod fmt.Println(err.Error()) return nil, nil, nil } - err = tpn.BlockProcessor.ApplyBodyToHeader(blockHeader, blockBody) + blockBody, err = tpn.BlockProcessor.ApplyBodyToHeader(blockHeader, blockBody) if err != nil { fmt.Println(err.Error()) return nil, nil, nil @@ -953,7 +1082,6 @@ func (tpn *TestProcessorNode) ProposeBlock(round uint64, nonce uint64) (data.Bod // BroadcastBlock broadcasts the block and body to the connected peers func (tpn *TestProcessorNode) BroadcastBlock(body data.BodyHandler, header data.HeaderHandler) { _ = tpn.BroadcastMessenger.BroadcastBlock(body, header) - _ = tpn.BroadcastMessenger.BroadcastShardHeader(header) miniBlocks, transactions, _ := tpn.BlockProcessor.MarshalizedDataToBroadcast(header, body) _ = tpn.BroadcastMessenger.BroadcastMiniBlocks(miniBlocks) _ = tpn.BroadcastMessenger.BroadcastTransactions(transactions) @@ -966,29 +1094,21 @@ func (tpn *TestProcessorNode) CommitBlock(body data.BodyHandler, header data.Hea // GetShardHeader returns the first *dataBlock.Header stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetShardHeader(nonce uint64) (*dataBlock.Header, error) { - invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.Headers() == nil || tpn.ShardDataPool.HeadersNonces() == nil + invalidCachers := tpn.ShardDataPool == nil || tpn.ShardDataPool.Headers() == nil if invalidCachers { return nil, errors.New("invalid data pool") } - syncMapHashNonce, ok := tpn.ShardDataPool.HeadersNonces().Get(nonce) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) - } - - headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) + headerObjects, _, err := tpn.ShardDataPool.Headers().GetHeadersByNonceAndShardId(nonce, tpn.ShardCoordinator.SelfId()) + if err != nil { + return nil, errors.New(fmt.Sprintf("no headers found for nonce and shard id %d %d %s", nonce, tpn.ShardCoordinator.SelfId(), err.Error())) } - headerObject, ok := tpn.ShardDataPool.Headers().Get(headerHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) - } + headerObject := headerObjects[len(headerObjects)-1] header, ok := headerObject.(*dataBlock.Header) if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.Header stored in headers found for hash %s", hex.EncodeToString(headerHash))) + return nil, errors.New(fmt.Sprintf("not a *dataBlock.Header stored in headers found for nonce and shard id %d %d", nonce, tpn.ShardCoordinator.SelfId())) } return header, nil @@ -1050,29 +1170,21 @@ func (tpn *TestProcessorNode) GetMetaBlockBody(header *dataBlock.MetaBlock) (dat // GetMetaHeader returns the first *dataBlock.MetaBlock stored in datapools having the nonce provided as parameter func (tpn *TestProcessorNode) GetMetaHeader(nonce uint64) (*dataBlock.MetaBlock, error) { - invalidCachers := tpn.MetaDataPool == nil || tpn.MetaDataPool.MetaBlocks() == nil || tpn.MetaDataPool.HeadersNonces() == nil + invalidCachers := tpn.MetaDataPool == nil || tpn.MetaDataPool.Headers() == nil if invalidCachers { return nil, errors.New("invalid data pool") } - syncMapHashNonce, ok := tpn.MetaDataPool.HeadersNonces().Get(nonce) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce link in HeadersNonces for nonce %d", nonce)) - } - - headerHash, ok := syncMapHashNonce.Load(tpn.ShardCoordinator.SelfId()) - if !ok { - return nil, errors.New(fmt.Sprintf("no hash-nonce hash in HeadersNonces for nonce %d", nonce)) + headerObjects, _, err := tpn.MetaDataPool.Headers().GetHeadersByNonceAndShardId(nonce, sharding.MetachainShardId) + if err != nil { + return nil, errors.New(fmt.Sprintf("no headers found for nonce and shard id %d %d %s", nonce, sharding.MetachainShardId, err.Error())) } - headerObject, ok := tpn.MetaDataPool.MetaBlocks().Get(headerHash) - if !ok { - return nil, errors.New(fmt.Sprintf("no header found for hash %s", hex.EncodeToString(headerHash))) - } + headerObject := headerObjects[len(headerObjects)-1] header, ok := headerObject.(*dataBlock.MetaBlock) if !ok { - return nil, errors.New(fmt.Sprintf("not a *dataBlock.MetaBlock stored in headers found for hash %s", hex.EncodeToString(headerHash))) + return nil, errors.New(fmt.Sprintf("not a *dataBlock.MetaBlock stored in headers found for nonce and shard id %d %d", nonce, sharding.MetachainShardId)) } return header, nil @@ -1134,7 +1246,7 @@ func (tpn *TestProcessorNode) syncMetaNode(nonce uint64) error { header, body, func() time.Duration { - return time.Second * 2000 + return time.Second * 2 }, ) if err != nil { @@ -1185,3 +1297,32 @@ func (tpn *TestProcessorNode) initRounder() { func (tpn *TestProcessorNode) initRequestedItemsHandler() { tpn.RequestedItemsHandler = timecache.NewTimeCache(roundDuration) } + +func (tpn *TestProcessorNode) initBlockTracker(headerValidator process.HeaderConstructionValidator) { + argBaseTracker := track.ArgBaseTracker{ + Hasher: TestHasher, + HeaderValidator: headerValidator, + Marshalizer: TestMarshalizer, + RequestHandler: tpn.RequestHandler, + Rounder: tpn.Rounder, + ShardCoordinator: tpn.ShardCoordinator, + Store: tpn.Storage, + StartHeaders: tpn.GenesisBlocks, + } + + if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { + arguments := track.ArgShardTracker{ + ArgBaseTracker: argBaseTracker, + PoolsHolder: tpn.ShardDataPool, + } + + tpn.BlockTracker, _ = track.NewShardBlockTrack(arguments) + } else { + arguments := track.ArgMetaTracker{ + ArgBaseTracker: argBaseTracker, + PoolsHolder: tpn.MetaDataPool, + } + + tpn.BlockTracker, _ = track.NewMetaBlockTrack(arguments) + } +} diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go index ce074c7aec9..ec49fccada0 100644 --- a/integrationTests/testProcessorNodeWithMultisigner.go +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -43,8 +43,8 @@ func NewTestProcessorNodeWithCustomNodesCoordinator( HeaderSigVerifier: headerSigVerifier, ChainID: IntegrationTestsChainID, } - tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] + tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] llsig := &kmultisig.KyberMultiSignerBLS{} blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} @@ -125,6 +125,65 @@ func CreateNodesWithNodesCoordinator( return nodesMap } +// CreateNodesWithNodesCoordinatorAndHeaderSigVerifier returns a map with nodes per shard each using a real nodes coordinator and header sig verifier +func CreateNodesWithNodesCoordinatorAndHeaderSigVerifier( + nodesPerShard int, + nbMetaNodes int, + nbShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + seedAddress string, + signer crypto.SingleSigner, + keyGen crypto.KeyGenerator, +) map[uint32][]*TestProcessorNode { + cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) + pubKeys := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys, uint32(nbShards)) + nodesMap := make(map[uint32][]*TestProcessorNode) + for shardId, validatorList := range validatorsMap { + argumentsNodesCoordinator := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: shardConsensusGroupSize, + MetaConsensusGroupSize: metaConsensusGroupSize, + Hasher: TestHasher, + ShardId: shardId, + NbShards: uint32(nbShards), + Nodes: validatorsMap, + SelfPublicKey: []byte(strconv.Itoa(int(shardId))), + } + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator(argumentsNodesCoordinator) + + if err != nil { + fmt.Println("Error creating node coordinator") + } + + nodesList := make([]*TestProcessorNode, len(validatorList)) + args := headerCheck.ArgsHeaderSigVerifier{ + Marshalizer: TestMarshalizer, + Hasher: TestHasher, + NodesCoordinator: nodesCoordinator, + MultiSigVerifier: TestMultiSig, + SingleSigVerifier: signer, + KeyGen: keyGen, + } + headerSig, _ := headerCheck.NewHeaderSigVerifier(&args) + for i := range validatorList { + nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(nbShards), + shardId, + seedAddress, + nodesCoordinator, + cp, + i, + nil, + headerSig, + ) + } + nodesMap[shardId] = nodesList + } + + return nodesMap +} + // CreateNodesWithNodesCoordinatorKeygenAndSingleSigner returns a map with nodes per shard each using a real nodes coordinator // and a given single signer for blocks and a given key gen for blocks func CreateNodesWithNodesCoordinatorKeygenAndSingleSigner( @@ -205,15 +264,11 @@ func ProposeBlockWithConsensusSignature( fmt.Println("Error getting the validators public keys: ", err) } - // set the consensus reward addresses - for _, node := range nodesMap[shardId] { - node.BlockProcessor.SetConsensusData(randomness, round, 0, shardId) - } - + // set some randomness consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) // first node is block proposer body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) - header.SetPrevRandSeed(randomness) + header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) return body, header, txHashes, consensusNodes diff --git a/integrationTests/testProcessorNodeWithStateCheckpointModulus.go b/integrationTests/testProcessorNodeWithStateCheckpointModulus.go new file mode 100644 index 00000000000..3c2583a340e --- /dev/null +++ b/integrationTests/testProcessorNodeWithStateCheckpointModulus.go @@ -0,0 +1,100 @@ +package integrationTests + +import ( + "context" + "math/big" + + "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/ElrondNetwork/elrond-go/integrationTests/mock" + "github.com/ElrondNetwork/elrond-go/process/smartContract" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NewTestProcessorNodeWithStateCheckpointModulus creates a new testNodeProcessor with custom state checkpoint modulus +func NewTestProcessorNodeWithStateCheckpointModulus( + maxShards uint32, + nodeShardId uint32, + txSignPrivKeyShardId uint32, + initialNodeAddr string, + stateCheckpointModulus uint, +) *TestProcessorNode { + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + + pkBytes := make([]byte, 128) + address := make([]byte, 32) + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkBytes, address) + return []sharding.Validator{v}, nil + }, + } + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, + ChainID: IntegrationTestsChainID, + } + + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) + tpn.initDataPools() + tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( + TestAddressConverter, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + ) + tpn.initRounder() + tpn.initStorage() + tpn.AccntState, tpn.StateTrie, _ = CreateAccountsDB(factory.UserAccount) + tpn.PeerState, _, _ = CreateAccountsDB(factory.ValidatorAccount) + tpn.initChainHandler() + tpn.initEconomicsData() + tpn.initInterceptors() + tpn.initRequestedItemsHandler() + tpn.initResolvers() + tpn.initInnerProcessors() + tpn.SCQueryService, _ = smartContract.NewSCQueryService(tpn.VMContainer, tpn.EconomicsData.MaxGasLimitPerBlock()) + tpn.initValidatorStatistics() + rootHash, _ := tpn.ValidatorStatisticsProcessor.RootHash() + tpn.GenesisBlocks = CreateGenesisBlocks( + tpn.AccntState, + TestAddressConverter, + &sharding.NodesSetup{}, + tpn.ShardCoordinator, + tpn.Storage, + tpn.BlockChain, + TestMarshalizer, + TestHasher, + TestUint64Converter, + tpn.MetaDataPool, + tpn.EconomicsData.EconomicsData, + rootHash, + ) + tpn.initBlockProcessor(stateCheckpointModulus) + tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( + TestMarshalizer, + tpn.Messenger, + tpn.ShardCoordinator, + tpn.OwnAccount.SkTxSign, + tpn.OwnAccount.SingleSigner, + ) + tpn.setGenesisBlock() + tpn.initNode() + tpn.SCQueryService, _ = smartContract.NewSCQueryService(tpn.VMContainer, tpn.EconomicsData.MaxGasLimitPerBlock()) + tpn.addHandlersForCounters() + tpn.addGenesisBlocksIntoStorage() + + return tpn +} diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 3572f187853..7f0be63e0e5 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -6,6 +6,7 @@ import ( "math/big" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/process/block" @@ -45,6 +46,7 @@ func NewTestSyncNode( StorageBootstrapper: &mock.StorageBootstrapperMock{}, HeaderSigVerifier: &mock.HeaderSigVerifierStub{}, ChainID: IntegrationTestsChainID, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, } kg := &mock.KeyGenMock{} @@ -65,7 +67,7 @@ func NewTestSyncNode( func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.initRounder() tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(0) + tpn.AccntState, tpn.StateTrie, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateSimpleGenesisBlocks(tpn.ShardCoordinator) tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( @@ -112,6 +114,14 @@ func (tpn *TestProcessorNode) addGenesisBlocksIntoStorage() { func (tpn *TestProcessorNode) initBlockProcessorWithSync() { var err error + argsHeaderValidator := block.ArgsHeaderValidator{ + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + } + headerValidator, _ := block.NewHeaderValidator(argsHeaderValidator) + + tpn.initBlockTracker(headerValidator) + argumentsBase := block.ArgBaseProcessor{ Accounts: tpn.AccntState, ForkDetector: nil, @@ -122,21 +132,23 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { NodesCoordinator: tpn.NodesCoordinator, SpecialAddressHandler: tpn.SpecialAddressHandler, Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, RequestHandler: tpn.RequestHandler, Core: nil, BlockChainHook: &mock.BlockChainHookHandlerMock{}, ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorMock{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + HeaderValidator: headerValidator, Rounder: &mock.RounderMock{}, BootStorer: &mock.BoostrapStorerMock{ PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { return nil }, }, + BlockTracker: tpn.BlockTracker, } if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.ForkDetector, _ = sync.NewMetaForkDetector(tpn.Rounder, tpn.BlackListHandler, 0) + tpn.ForkDetector, _ = sync.NewMetaForkDetector(tpn.Rounder, tpn.BlackListHandler, tpn.BlockTracker, 0) argumentsBase.Core = &mock.ServiceContainerMock{} argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.TxCoordinator = &mock.TransactionCoordinatorMock{} @@ -146,19 +158,21 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { SCDataGetter: &mock.ScQueryMock{}, SCToProtocol: &mock.SCToProtocolStub{}, PeerChangesHandler: &mock.PeerChangesHandler{}, + PendingMiniBlocks: &mock.PendingMiniBlocksHandlerStub{}, } tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) } else { - tpn.ForkDetector, _ = sync.NewShardForkDetector(tpn.Rounder, tpn.BlackListHandler, 0) + tpn.ForkDetector, _ = sync.NewShardForkDetector(tpn.Rounder, tpn.BlackListHandler, tpn.BlockTracker, 0) argumentsBase.ForkDetector = tpn.ForkDetector argumentsBase.BlockChainHook = tpn.BlockchainHook argumentsBase.TxCoordinator = tpn.TxCoordinator arguments := block.ArgShardProcessor{ - ArgBaseProcessor: argumentsBase, - DataPool: tpn.ShardDataPool, - TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, + ArgBaseProcessor: argumentsBase, + DataPool: tpn.ShardDataPool, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, + StateCheckpointModulus: stateCheckpointModulus, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) @@ -170,6 +184,11 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { } func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error) { + accountsStateWrapper, err := state.NewAccountsDbWrapperSync(tpn.AccntState) + if err != nil { + return nil, err + } + bootstrap, err := sync.NewShardBootstrap( tpn.ShardDataPool, tpn.Storage, @@ -182,7 +201,7 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error tpn.ForkDetector, tpn.ResolverFinder, tpn.ShardCoordinator, - tpn.AccntState, + accountsStateWrapper, tpn.BlackListHandler, tpn.Messenger, tpn.BootstrapStorer, @@ -217,6 +236,7 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, e tpn.BootstrapStorer, tpn.StorageBootstrapper, tpn.RequestedItemsHandler, + tpn.EpochStartTrigger, ) if err != nil { diff --git a/integrationTests/vm/arwen/arwenVM_test.go b/integrationTests/vm/arwen/arwenVM_test.go index dfed6e7b9a5..fe39f7b4105 100644 --- a/integrationTests/vm/arwen/arwenVM_test.go +++ b/integrationTests/vm/arwen/arwenVM_test.go @@ -168,9 +168,8 @@ func runWASMVMBenchmark( SndAddr: ownerAddressBytes, GasPrice: gasPrice, GasLimit: gasLimit, - Data: scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine), + Data: []byte(scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine)), Signature: nil, - Challenge: nil, } txProc, accnts, blockchainHook := vm.CreateTxProcessorArwenVMWithGasSchedule(tb, ownerNonce, ownerAddressBytes, ownerBalance, gasSchedule) @@ -193,9 +192,8 @@ func runWASMVMBenchmark( SndAddr: alice, GasPrice: 0, GasLimit: gasLimit, - Data: "_main", + Data: []byte("_main"), Signature: nil, - Challenge: nil, } for i := 0; i < numRun; i++ { @@ -258,7 +256,7 @@ func deployWithTransferAndExecuteERC20(t *testing.T, numRun int, gasSchedule map transferOnCalls, gasPrice, gasLimit, - scCodeString+"@"+hex.EncodeToString(factory.ArwenVirtualMachine), + []byte(scCodeString+"@"+hex.EncodeToString(factory.ArwenVirtualMachine)), ) err = txProc.ProcessTransaction(tx) @@ -330,9 +328,8 @@ func TestWASMNamespacing(t *testing.T) { SndAddr: ownerAddressBytes, GasPrice: gasPrice, GasLimit: gasLimit, - Data: scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine), + Data: []byte(scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine)), Signature: nil, - Challenge: nil, } txProc, accnts, blockchainHook := vm.CreatePreparedTxProcessorAndAccountsWithVMs(t, ownerNonce, ownerAddressBytes, ownerBalance) @@ -360,9 +357,8 @@ func TestWASMNamespacing(t *testing.T) { SndAddr: alice, GasPrice: gasPrice, GasLimit: gasLimit, - Data: "main", + Data: []byte("main"), Signature: nil, - Challenge: nil, } err = txProc.ProcessTransaction(tx) @@ -390,9 +386,8 @@ func TestWASMMetering(t *testing.T) { SndAddr: ownerAddressBytes, GasPrice: gasPrice, GasLimit: gasLimit, - Data: scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine), + Data: []byte(scCodeString + "@" + hex.EncodeToString(factory.ArwenVirtualMachine)), Signature: nil, - Challenge: nil, } txProc, accnts, blockchainHook := vm.CreatePreparedTxProcessorAndAccountsWithVMs(t, ownerNonce, ownerAddressBytes, ownerBalance) @@ -420,9 +415,8 @@ func TestWASMMetering(t *testing.T) { SndAddr: alice, GasPrice: gasPrice, GasLimit: gasLimit, - Data: "_main", + Data: []byte("_main"), Signature: nil, - Challenge: nil, } err = txProc.ProcessTransaction(tx) @@ -446,6 +440,10 @@ func TestWASMMetering(t *testing.T) { } func TestMultipleTimesERC20BigIntInBatches(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + for i := 0; i < 10; i++ { deployAndExecuteERC20WithBigInt(t, 1000, nil) } @@ -472,7 +470,7 @@ func deployAndExecuteERC20WithBigInt(t *testing.T, numRun int, gasSchedule map[s transferOnCalls, gasPrice, gasLimit, - scCodeString+"@"+hex.EncodeToString(factory.ArwenVirtualMachine)+"@"+hex.EncodeToString(ownerBalance.Bytes()), + []byte(scCodeString+"@"+hex.EncodeToString(factory.ArwenVirtualMachine)+"@"+hex.EncodeToString(ownerBalance.Bytes())), ) err = txProc.ProcessTransaction(tx) @@ -560,7 +558,7 @@ func TestJurnalizingAndTimeToProcessChange(t *testing.T) { transferOnCalls, gasPrice, gasLimit, - scCodeString+"@"+hex.EncodeToString(factory.ArwenVirtualMachine)+"@"+hex.EncodeToString(ownerBalance.Bytes()), + []byte(scCodeString+"@"+hex.EncodeToString(factory.ArwenVirtualMachine)+"@"+hex.EncodeToString(ownerBalance.Bytes())), ) err = txProc.ProcessTransaction(tx) diff --git a/integrationTests/vm/arwen/utils.go b/integrationTests/vm/arwen/utils.go index 9f13b580494..5a893f7a1a7 100644 --- a/integrationTests/vm/arwen/utils.go +++ b/integrationTests/vm/arwen/utils.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "math" "math/big" + "path/filepath" "testing" "github.com/ElrondNetwork/elrond-go/core" @@ -115,7 +116,7 @@ func (context *testContext) deploySC(wasmPath string, parametersString string) { SndAddr: owner.Address, GasPrice: 1, GasLimit: math.MaxInt32, - Data: txData, + Data: []byte(txData), } err := context.TxProcessor.ProcessTransaction(tx) @@ -132,7 +133,7 @@ func (context *testContext) deploySC(wasmPath string, parametersString string) { } func getSCCode(fileName string) string { - code, _ := ioutil.ReadFile(fileName) + code, _ := ioutil.ReadFile(filepath.Clean(fileName)) codeEncoded := hex.EncodeToString(code) return codeEncoded @@ -150,7 +151,7 @@ func (context *testContext) executeSCWithValue(sender *testParticipant, txData s SndAddr: sender.Address, GasPrice: 1, GasLimit: math.MaxInt32, - Data: txData, + Data: []byte(txData), } err := context.TxProcessor.ProcessTransaction(tx) diff --git a/integrationTests/vm/iele/vmAgar_test.go b/integrationTests/vm/iele/vmAgar_test.go index ee5b7df2984..7dd51041dc7 100644 --- a/integrationTests/vm/iele/vmAgar_test.go +++ b/integrationTests/vm/iele/vmAgar_test.go @@ -189,6 +189,10 @@ func TestAgarioContractTopUpAnfWithdrawShouldWork(t *testing.T) { } func TestAgarioContractJoinGameReward(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + scCode, err := ioutil.ReadFile(agarioFile) assert.Nil(t, err) @@ -308,7 +312,7 @@ func TestAgarioContractJoinGameReward(t *testing.T) { computedBalance := big.NewInt(0).Set(afterJoinUsersBalances[i]) computedBalance.Add(computedBalance, prize) - assert.Equal(t, computedBalance, existingUserBalance) + assert.Equal(t, computedBalance.Uint64(), existingUserBalance.Uint64()) } transferredBack := big.NewInt(0).Set(prize) @@ -318,7 +322,7 @@ func TestAgarioContractJoinGameReward(t *testing.T) { computedBalance.Sub(computedBalance, transferredBack) balanceOfSC, _ = blockchainHook.GetBalance(scAddressBytes) fmt.Printf("balance of SC: %v\n", balanceOfSC) - assert.Equal(t, computedBalance, balanceOfSC) + assert.Equal(t, computedBalance.Uint64(), balanceOfSC.Uint64()) } func BenchmarkAgarioJoinGame(b *testing.B) { diff --git a/integrationTests/vm/iele/vmDeploy_test.go b/integrationTests/vm/iele/vmDeploy_test.go index b4ca1d62aaf..42a98eb8783 100644 --- a/integrationTests/vm/iele/vmDeploy_test.go +++ b/integrationTests/vm/iele/vmDeploy_test.go @@ -30,7 +30,7 @@ func TestVMInvalidSmartContractCodeShouldNotGenerateAccount(t *testing.T) { Value: big.NewInt(0), SndAddr: senderAddressBytes, RcvAddr: vm.CreateEmptyAddress().Bytes(), - Data: string(scCode) + "@" + hex.EncodeToString(factory.IELEVirtualMachine), + Data: []byte(string(scCode) + "@" + hex.EncodeToString(factory.IELEVirtualMachine)), GasPrice: gasPrice, GasLimit: gasLimit, } diff --git a/integrationTests/vm/systemVM/systemVm_test.go b/integrationTests/vm/systemVM/systemVm_test.go index 465a6900c3c..3d17e0f68a0 100644 --- a/integrationTests/vm/systemVM/systemVm_test.go +++ b/integrationTests/vm/systemVM/systemVm_test.go @@ -10,7 +10,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/integrationTests" - "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/vm/factory" "github.com/stretchr/testify/assert" ) @@ -20,8 +19,6 @@ func TestStakingUnstakingAndUnboundingOnMultiShardEnvironment(t *testing.T) { t.Skip("this is not a short test") } - _ = logger.SetLogLevel("*:INFO,*:DEBUG") - numOfShards := 2 nodesPerShard := 3 numMetachainNodes := 3 @@ -115,6 +112,114 @@ func TestStakingUnstakingAndUnboundingOnMultiShardEnvironment(t *testing.T) { verifyUnbound(t, nodes, initialVal, consumedBalance) } +func TestStakingUnstakingAndUnboundingOnMultiShardEnvironmentWithValidatorStatistics(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numOfShards := 2 + nodesPerShard := 3 + numMetachainNodes := 3 + shardConsensusGroupSize := 2 + metaConsensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + numMetachainNodes, + numOfShards, + shardConsensusGroupSize, + metaConsensusGroupSize, + integrationTests.GetConnectableAddress(advertiser), + ) + + nodes := make([]*integrationTests.TestProcessorNode, 0) + + for _, nds := range nodesMap { + nodes = append(nodes, nds...) + } + + idxProposers := make([]int, numOfShards+1) + for i := 0; i < numOfShards; i++ { + idxProposers[i] = i * nodesPerShard + } + idxProposers[numOfShards] = numOfShards * nodesPerShard + + integrationTests.DisplayAndStartNodes(nodes) + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Node.Stop() + } + }() + + initialVal := big.NewInt(10000000000) + integrationTests.MintAllNodes(nodes, initialVal) + + verifyInitialBalance(t, nodes, initialVal) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + nonce++ + + ///////////------- send stake tx and check sender's balance + var txData string + for index, node := range nodes { + pubKey := generateUniqueKey(index) + txData = "stake" + "@" + pubKey + integrationTests.CreateAndSendTransaction(node, node.EconomicsData.StakeValue(), factory.StakingSCAddress, txData) + } + + time.Sleep(time.Second) + + nrRoundsToPropagateMultiShard := 10 + nonce, round = waitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) + + time.Sleep(time.Second) + + consumedBalance := big.NewInt(0).Add(big.NewInt(int64(len(txData))), big.NewInt(0).SetUint64(integrationTests.MinTxGasLimit)) + consumedBalance.Mul(consumedBalance, big.NewInt(0).SetUint64(integrationTests.MinTxGasPrice)) + + checkAccountsAfterStaking(t, nodes, initialVal, consumedBalance) + + /////////------ send unStake tx + for index, node := range nodes { + pubKey := generateUniqueKey(index) + txData = "unStake" + "@" + pubKey + integrationTests.CreateAndSendTransaction(node, big.NewInt(0), factory.StakingSCAddress, txData) + } + consumed := big.NewInt(0).Add(big.NewInt(0).SetUint64(integrationTests.MinTxGasLimit), big.NewInt(int64(len(txData)))) + consumed.Mul(consumed, big.NewInt(0).SetUint64(integrationTests.MinTxGasPrice)) + consumedBalance.Add(consumedBalance, consumed) + + time.Sleep(time.Second) + + nonce, round = waitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) + + /////////----- wait for unbound period + nonce, round = waitOperationToBeDone(t, nodes, int(nodes[0].EconomicsData.UnBoundPeriod()), nonce, round, idxProposers) + + ////////----- send unBound + for index, node := range nodes { + pubKey := generateUniqueKey(index) + txData = "unBound" + "@" + pubKey + integrationTests.CreateAndSendTransaction(node, big.NewInt(0), factory.StakingSCAddress, txData) + } + consumed = big.NewInt(0).Add(big.NewInt(0).SetUint64(integrationTests.MinTxGasLimit), big.NewInt(int64(len(txData)))) + consumed.Mul(consumed, big.NewInt(0).SetUint64(integrationTests.MinTxGasPrice)) + consumedBalance.Add(consumedBalance, consumed) + + time.Sleep(time.Second) + + _, _ = waitOperationToBeDone(t, nodes, nrRoundsToPropagateMultiShard, nonce, round, idxProposers) + + verifyUnbound(t, nodes, initialVal, consumedBalance) +} + func verifyUnbound(t *testing.T, nodes []*integrationTests.TestProcessorNode, initialVal, consumedBalance *big.Int) { for _, node := range nodes { accShardId := node.ShardCoordinator.ComputeId(node.OwnAccount.Address) @@ -183,5 +288,5 @@ func getAccountFromAddrBytes(accState state.AccountsAdapter, address []byte) *st func generateUniqueKey(identifier int) string { neededLength := 256 uniqueIdentifier := fmt.Sprintf("%d", identifier) - return strings.Repeat("0", neededLength - len(uniqueIdentifier)) + uniqueIdentifier + return strings.Repeat("0", neededLength-len(uniqueIdentifier)) + uniqueIdentifier } diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 02e9ffaf159..366da079b3c 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -9,10 +9,12 @@ import ( "testing" arwenConfig "github.com/ElrondNetwork/arwen-wasm-vm/config" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" dataTransaction "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/data/trie/evictionWaitingList" "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/marshal" @@ -59,17 +61,18 @@ func CreateEmptyAddress() state.AddressContainer { func CreateMemUnit() storage.Storer { cache, _ := storageUnit.NewCache(storageUnit.LRUCache, 10, 1) - persist, _ := memorydb.New() - unit, _ := storageUnit.NewStorageUnit(cache, persist) + unit, _ := storageUnit.NewStorageUnit(cache, memorydb.New()) return unit } func CreateInMemoryShardAccountsDB() *state.AccountsDB { marsh := &marshal.JsonMarshalizer{} store := CreateMemUnit() + ewl, _ := evictionWaitingList.NewEvictionWaitingList(100, memorydb.New(), marsh) + trieStorage, _ := trie.NewTrieStorageManager(store, &config.DBConfig{}, ewl) - tr, _ := trie.NewTrie(store, marsh, testHasher) + tr, _ := trie.NewTrie(trieStorage, marsh, testHasher) adb, _ := state.NewAccountsDB(tr, testHasher, marsh, &accountFactory{}) return adb @@ -157,6 +160,8 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa &mock.UnsignedTxHandlerMock{}, txTypeHandler, &mock.FeeHandlerStub{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) return txProcessor @@ -258,6 +263,8 @@ func CreateTxProcessorWithOneSCExecutorWithVMs( &mock.UnsignedTxHandlerMock{}, txTypeHandler, &mock.FeeHandlerStub{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) return txProcessor @@ -376,7 +383,7 @@ func CreateTx( Value: value, SndAddr: senderAddressBytes, RcvAddr: receiverAddressBytes, - Data: txData, + Data: []byte(txData), GasPrice: gasPrice, GasLimit: gasLimit, } @@ -391,7 +398,7 @@ func CreateDeployTx( value *big.Int, gasPrice uint64, gasLimit uint64, - scCodeAndVMType string, + scCodeAndVMType []byte, ) *dataTransaction.Transaction { return &dataTransaction.Transaction{ @@ -465,7 +472,7 @@ func CreateTopUpTx(nonce uint64, value *big.Int, scAddrress []byte, sndAddress [ SndAddr: sndAddress, GasPrice: 0, GasLimit: 5000000, - Data: "topUp@00", + Data: []byte("topUp@00"), } } @@ -483,7 +490,7 @@ func CreateTransferTx( SndAddr: sndAddress, GasPrice: 0, GasLimit: 5000000, - Data: "transfer@" + hex.EncodeToString(rcvAddress) + "@" + hex.EncodeToString(value.Bytes()), + Data: []byte("transfer@" + hex.EncodeToString(rcvAddress) + "@" + hex.EncodeToString(value.Bytes())), } } @@ -501,6 +508,6 @@ func CreateTransferTokenTx( SndAddr: sndAddress, GasPrice: 0, GasLimit: 5000000, - Data: "transferToken@" + hex.EncodeToString(rcvAddress) + "@" + hex.EncodeToString(value.Bytes()), + Data: []byte("transferToken@" + hex.EncodeToString(rcvAddress) + "@" + hex.EncodeToString(value.Bytes())), } } diff --git a/logger/mock/marshalizersMock.go b/logger/mock/marshalizersMock.go index cafdc54d262..a577440758e 100644 --- a/logger/mock/marshalizersMock.go +++ b/logger/mock/marshalizersMock.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/golang/protobuf/proto" ) @@ -23,7 +22,7 @@ type CapnpMarshalizer struct{} func (x *CapnpMarshalizer) Marshal(obj interface{}) ([]byte, error) { out := bytes.NewBuffer(nil) - o := obj.(data.CapnpHelper) + o := obj.(marshal.CapnpHelper) // set the members to capnp struct err := o.Save(out) @@ -37,7 +36,7 @@ func (x *CapnpMarshalizer) Marshal(obj interface{}) ([]byte, error) { func (x *CapnpMarshalizer) Unmarshal(obj interface{}, buff []byte) error { out := bytes.NewBuffer(buff) - o := obj.(data.CapnpHelper) + o := obj.(marshal.CapnpHelper) // set the members to capnp struct err := o.Load(out) diff --git a/data/capnpHelper.go b/marshal/capnpHelper.go similarity index 96% rename from data/capnpHelper.go rename to marshal/capnpHelper.go index 4c92aad2c52..c704a678796 100644 --- a/data/capnpHelper.go +++ b/marshal/capnpHelper.go @@ -1,4 +1,4 @@ -package data +package marshal import ( "io" diff --git a/marshal/capnpMarshalizer.go b/marshal/capnpMarshalizer.go index e02f11d6180..e08d24c5cf3 100644 --- a/marshal/capnpMarshalizer.go +++ b/marshal/capnpMarshalizer.go @@ -2,8 +2,6 @@ package marshal import ( "bytes" - - "github.com/ElrondNetwork/elrond-go/data" ) // CapnpMarshalizer implements marshaling with capnproto @@ -15,7 +13,7 @@ type CapnpMarshalizer struct { func (x *CapnpMarshalizer) Marshal(obj interface{}) ([]byte, error) { out := bytes.NewBuffer(nil) - o := obj.(data.CapnpHelper) + o := obj.(CapnpHelper) // set the members to capnp struct err := o.Save(out) @@ -31,7 +29,7 @@ func (x *CapnpMarshalizer) Marshal(obj interface{}) ([]byte, error) { func (x *CapnpMarshalizer) Unmarshal(obj interface{}, buff []byte) error { out := bytes.NewBuffer(buff) - o := obj.(data.CapnpHelper) + o := obj.(CapnpHelper) // set the members to capnp struct err := o.Load(out) diff --git a/marshal/capnpMarshalizer_test.go b/marshal/capnpMarshalizer_test.go index f91d32fff29..58c12e0289d 100644 --- a/marshal/capnpMarshalizer_test.go +++ b/marshal/capnpMarshalizer_test.go @@ -6,7 +6,6 @@ import ( "math/rand" "testing" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/marshal" @@ -16,7 +15,7 @@ import ( type dataGenerator interface { // GenerateDummyArray generates an array of data of the implementer type // The implementer needs to implement CapnpHelper as well - GenerateDummyArray() []data.CapnpHelper + GenerateDummyArray() []marshal.CapnpHelper } type Header struct { @@ -55,7 +54,7 @@ func benchMarshal(b *testing.B, m marshal.Marshalizer, obj dataGenerator) { } } -func benchUnmarshal(b *testing.B, m marshal.Marshalizer, obj interface{}, validate bool) { +func benchUnmarshal(b *testing.B, m marshal.Marshalizer, obj interface{}, validate bool, sizeCheck bool) { b.StopTimer() dArray := obj.(dataGenerator).GenerateDummyArray() l := len(dArray) @@ -69,12 +68,16 @@ func benchUnmarshal(b *testing.B, m marshal.Marshalizer, obj interface{}, valida serialized[i] = t } + if sizeCheck { + m = marshal.NewSizeCheckUnmarshalizer(m, 100) + } + b.ReportAllocs() b.StartTimer() for i := 0; i < b.N; i++ { n := i % l - err := m.Unmarshal(obj.(data.CapnpHelper), serialized[n]) + err := m.Unmarshal(obj.(marshal.CapnpHelper), serialized[n]) assert.Nil(b, err) @@ -102,25 +105,37 @@ func BenchmarkJsonTransactionMarshal(b *testing.B) { func BenchmarkCapnprotoTransactionUnmarshalNoValidate(b *testing.B) { tx := &Transaction{} cmr := &marshal.CapnpMarshalizer{} - benchUnmarshal(b, cmr, tx, false) + benchUnmarshal(b, cmr, tx, false, false) +} + +func BenchmarkCapnprotoTransactionUnmarshalNoValidate_SizeCheck(b *testing.B) { + tx := &Transaction{} + cmr := &marshal.CapnpMarshalizer{} + benchUnmarshal(b, cmr, tx, false, true) } func BenchmarkJsonTransactionUnmarshalNoValidate(b *testing.B) { tx := &Transaction{} jmr := &marshal.JsonMarshalizer{} - benchUnmarshal(b, jmr, tx, false) + benchUnmarshal(b, jmr, tx, false, false) +} + +func BenchmarkJsonTransactionUnmarshalNoValidate_SizeCheck(b *testing.B) { + tx := &Transaction{} + jmr := &marshal.JsonMarshalizer{} + benchUnmarshal(b, jmr, tx, false, true) } func BenchmarkCapnprotoTransactionUnmarshalValidate(b *testing.B) { tx := &Transaction{} cmr := &marshal.CapnpMarshalizer{} - benchUnmarshal(b, cmr, tx, true) + benchUnmarshal(b, cmr, tx, true, false) } func BenchmarkJsonTransactionUnmarshalValidate(b *testing.B) { tx := &Transaction{} jmr := &marshal.JsonMarshalizer{} - benchUnmarshal(b, jmr, tx, true) + benchUnmarshal(b, jmr, tx, true, false) } func BenchmarkCapnprotoMiniBlocksMarshal(b *testing.B) { @@ -138,25 +153,37 @@ func BenchmarkJsonMiniBlocksMarshal(b *testing.B) { func BenchmarkCapnprotoMiniBlocksUnmarshalNoValidate(b *testing.B) { bl := &MiniBlock{} cmr := &marshal.CapnpMarshalizer{} - benchUnmarshal(b, cmr, bl, false) + benchUnmarshal(b, cmr, bl, false, false) +} + +func BenchmarkCapnprotoMiniBlocksUnmarshalNoValidate_SizeCheck(b *testing.B) { + bl := &MiniBlock{} + cmr := &marshal.CapnpMarshalizer{} + benchUnmarshal(b, cmr, bl, false, true) } func BenchmarkJsonMiniBlocksUnmarshalNoValidate(b *testing.B) { bl := &MiniBlock{} jmr := &marshal.JsonMarshalizer{} - benchUnmarshal(b, jmr, bl, false) + benchUnmarshal(b, jmr, bl, false, false) +} + +func BenchmarkJsonMiniBlocksUnmarshalNoValidate_SizeCheck(b *testing.B) { + bl := &MiniBlock{} + jmr := &marshal.JsonMarshalizer{} + benchUnmarshal(b, jmr, bl, false, true) } func BenchmarkCapnprotoMiniBlocksUnmarshalValidate(b *testing.B) { bl := &MiniBlock{} cmr := &marshal.CapnpMarshalizer{} - benchUnmarshal(b, cmr, bl, true) + benchUnmarshal(b, cmr, bl, true, false) } func BenchmarkJsonMiniBlocksUnmarshalValidate(b *testing.B) { bl := &MiniBlock{} cmr := &marshal.JsonMarshalizer{} - benchUnmarshal(b, cmr, bl, true) + benchUnmarshal(b, cmr, bl, true, false) } func BenchmarkCapnprotoHeaderMarshal(b *testing.B) { @@ -174,30 +201,42 @@ func BenchmarkJsonHeaderMarshal(b *testing.B) { func BenchmarkCapnprotoHeaderUnmarshalNoValidate(b *testing.B) { h := &Header{} cmr := &marshal.CapnpMarshalizer{} - benchUnmarshal(b, cmr, h, false) + benchUnmarshal(b, cmr, h, false, false) +} + +func BenchmarkCapnprotoHeaderUnmarshalNoValidate_SizeCheck(b *testing.B) { + h := &Header{} + cmr := &marshal.CapnpMarshalizer{} + benchUnmarshal(b, cmr, h, false, true) } func BenchmarkJsonHeaderUnmarshalNoValidate(b *testing.B) { h := &Header{} jmr := &marshal.JsonMarshalizer{} - benchUnmarshal(b, jmr, h, false) + benchUnmarshal(b, jmr, h, false, false) +} + +func BenchmarkJsonHeaderUnmarshalNoValidate_SizeCheck(b *testing.B) { + h := &Header{} + jmr := &marshal.JsonMarshalizer{} + benchUnmarshal(b, jmr, h, false, true) } func BenchmarkCapnprotoHeaderUnmarshalValidate(b *testing.B) { h := &Header{} cmr := &marshal.CapnpMarshalizer{} - benchUnmarshal(b, cmr, h, true) + benchUnmarshal(b, cmr, h, true, false) } func BenchmarkJsonHeaderUnmarshalValidate(b *testing.B) { h := &Header{} jmr := &marshal.JsonMarshalizer{} - benchUnmarshal(b, jmr, h, true) + benchUnmarshal(b, jmr, h, true, false) } // GenerateDummyArray is used to generate an array of MiniBlockHeaders with dummy data -func (sBlock *MiniBlock) GenerateDummyArray() []data.CapnpHelper { - sBlocks := make([]data.CapnpHelper, 0, 1000) +func (sBlock *MiniBlock) GenerateDummyArray() []marshal.CapnpHelper { + sBlocks := make([]marshal.CapnpHelper, 0, 1000) for i := 0; i < 1000; i++ { lenTxHashes := rand.Intn(20) + 1 @@ -218,8 +257,8 @@ func (sBlock *MiniBlock) GenerateDummyArray() []data.CapnpHelper { } // GenerateDummyArray is used to generate an array of block headers with dummy data -func (h *Header) GenerateDummyArray() []data.CapnpHelper { - headers := make([]data.CapnpHelper, 0, 1000) +func (h *Header) GenerateDummyArray() []marshal.CapnpHelper { + headers := make([]marshal.CapnpHelper, 0, 1000) mbh := block.MiniBlockHeader{ Hash: []byte("mini block header"), @@ -257,8 +296,8 @@ func (h *Header) GenerateDummyArray() []data.CapnpHelper { } // GenerateDummyArray is used to generate an array of transactions with dummy data -func (tx *Transaction) GenerateDummyArray() []data.CapnpHelper { - transactions := make([]data.CapnpHelper, 0, 1000) +func (tx *Transaction) GenerateDummyArray() []marshal.CapnpHelper { + transactions := make([]marshal.CapnpHelper, 0, 1000) val := big.NewInt(0) _ = val.GobDecode([]byte(RandomStr(32))) @@ -272,9 +311,8 @@ func (tx *Transaction) GenerateDummyArray() []data.CapnpHelper { SndAddr: []byte(RandomStr(32)), GasPrice: uint64(rand.Int63n(10000)), GasLimit: uint64(rand.Int63n(10000)), - Data: RandomStr(32), + Data: []byte(RandomStr(32)), Signature: []byte(RandomStr(32)), - Challenge: []byte(RandomStr(32)), }, }) } diff --git a/marshal/errors.go b/marshal/errors.go index 2462384a58d..ac7296977b3 100644 --- a/marshal/errors.go +++ b/marshal/errors.go @@ -7,3 +7,7 @@ var ErrMarshallingProto = errors.New("can not serialize the object") // ErrUnmarshallingProto is raised when the object that needs to be unmarshaled does not implement proto.Message var ErrUnmarshallingProto = errors.New("obj does not implement proto.Message") + +// ErrUnmarshallingBadSize is raised when the provided serialized data size exceeds the re-serialized data size +// plus an additional provided delta +var ErrUnmarshallingBadSize = errors.New("imput buffer too long") diff --git a/marshal/jsonMarshalizer.go b/marshal/jsonMarshalizer.go index 2091861f169..25662e84c72 100644 --- a/marshal/jsonMarshalizer.go +++ b/marshal/jsonMarshalizer.go @@ -12,7 +12,7 @@ type JsonMarshalizer struct { // Marshal tries to serialize obj parameter func (j JsonMarshalizer) Marshal(obj interface{}) ([]byte, error) { if obj == nil { - return nil, errors.New("NIL object to serilize from!") + return nil, errors.New("nil object to serialize from") } return json.Marshal(obj) @@ -21,7 +21,7 @@ func (j JsonMarshalizer) Marshal(obj interface{}) ([]byte, error) { // Unmarshal tries to deserialize input buffer values into input object func (j JsonMarshalizer) Unmarshal(obj interface{}, buff []byte) error { if obj == nil { - return errors.New("nil object to serilize to") + return errors.New("nil object to serialize to") } if buff == nil { return errors.New("nil byte buffer to deserialize from") diff --git a/marshal/sizeCheckUnmarshalizer.go b/marshal/sizeCheckUnmarshalizer.go new file mode 100644 index 00000000000..3a8d28c37a8 --- /dev/null +++ b/marshal/sizeCheckUnmarshalizer.go @@ -0,0 +1,46 @@ +package marshal + +type sizeCheckUnmarshalizer struct { + Marshalizer + acceptedDelta uint32 +} + +// NewSizeCheckUnmarshalizer creates a wrapper around an existing marshalizer m +// which, during unmarshaling, also checks that the provided buffer dose not contain +// additional unused data. +func NewSizeCheckUnmarshalizer(m Marshalizer, maxDelta uint32) Marshalizer { + scu := &sizeCheckUnmarshalizer{ + Marshalizer: m, + acceptedDelta: maxDelta, + } + return scu +} + +// Unmarshal tries to unmarshal input buffer values into output object, and checks +// for additional unused data +func (scu *sizeCheckUnmarshalizer) Unmarshal(obj interface{}, buff []byte) error { + err := scu.Marshalizer.Unmarshal(obj, buff) + if err != nil { + return err + } + + out, erro := scu.Marshal(obj) + if erro != nil { + return err + } + + maxSize := len(out) + len(out)*int(scu.acceptedDelta)/100 + if len(buff) > maxSize { + return ErrUnmarshallingBadSize + } + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface or +// target marshalizer +func (scu *sizeCheckUnmarshalizer) IsInterfaceNil() bool { + if scu != nil { + return scu.Marshalizer == nil || scu.Marshalizer.IsInterfaceNil() + } + return true +} diff --git a/marshal/sizeCheckUnmarshalizer_test.go b/marshal/sizeCheckUnmarshalizer_test.go new file mode 100644 index 00000000000..f9a8242f895 --- /dev/null +++ b/marshal/sizeCheckUnmarshalizer_test.go @@ -0,0 +1,121 @@ +package marshal + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type testStruct struct { + IntField int `json:"int_field"` + BoolField bool `json:"bool_field"` + StringField string `json:"string_field"` + BytesField []byte `json:"bytes_field"` + OptStringField string `json:"opt_string_field,omitempty"` + OptBytesField []byte `json:"opt_bytes_field,omitempty"` +} + +const ( + goodFull = `{ + "int_field":10, + "bool_field":true, + "string_field":"some string", + "bytes_field":"AQIDBAUG", + "opt_string_field":"some optional string", + "opt_bytes_field":"AQIDBAUGBwgJ" + }` + good = `{ + "int_field":10, + "bool_field":true, + "string_field":"some string", + "bytes_field":"AQIDBAUG" + }` + + withExtra = `{ + "int_field":10, + "bool_field":true, + "string_field":"some string", + "bytes_field":"AQIDBAUG", + "extra_string_field":"some optional string", + "extra_string_field1":"some optional string", + "extra_bytes_field":"AQIDBAUGBwgJ", + "extra_bytes_field1":"AQIDBAUGBwgJ" + }` + + badSyntax = `{ + "bool_field":true, + "string_field":"some string", + "bytes_field":"AQIDBAUG", + }` +) + +func TestSizeUnmarshlizer(t *testing.T) { + jm := &JsonMarshalizer{} + m := NewSizeCheckUnmarshalizer(jm, 20) + + ts := &testStruct{} + + err := m.Unmarshal(ts, []byte(goodFull)) + assert.Nil(t, err) + + err = m.Unmarshal(ts, []byte(good)) + assert.Nil(t, err) + + err = m.Unmarshal(ts, []byte(badSyntax)) + assert.NotNil(t, err) + + err = m.Unmarshal(ts, []byte(withExtra)) + assert.NotNil(t, err) +} + +func TestSizeUnmarshlizer_BadConstruction(t *testing.T) { + var jm *JsonMarshalizer + m := NewSizeCheckUnmarshalizer(jm, 20) + var scu *sizeCheckUnmarshalizer + m2 := scu + + assert.True(t, m.IsInterfaceNil()) + assert.True(t, m2.IsInterfaceNil()) +} + +func TestSizeUnmarshlizer_MU(t *testing.T) { + jm := &JsonMarshalizer{} + m := NewSizeCheckUnmarshalizer(jm, 20) + + o := testStruct{ + IntField: 1, + BoolField: true, + StringField: "test", + BytesField: []byte{1, 2, 3, 4, 5, 6}, + OptStringField: "opt str 1", + OptBytesField: nil, + } + o2 := testStruct{} + assert.NotEqual(t, o, o2) + bytes, err := m.Marshal(&o) + assert.Nil(t, err) + err = m.Unmarshal(&o2, bytes) + assert.Nil(t, err) + assert.Equal(t, o, o2) +} + +func BenchmarkSizeCheck_Disabled(b *testing.B) { + m := &JsonMarshalizer{} + benchInput := [][]byte{[]byte(goodFull), []byte(good), []byte(withExtra), []byte(badSyntax)} + ts := &testStruct{} + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = m.Unmarshal(ts, benchInput[i%len(benchInput)]) + } +} + +func BenchmarkSizeCheck_Enabled(b *testing.B) { + jm := &JsonMarshalizer{} + m := NewSizeCheckUnmarshalizer(jm, 20) + benchInput := [][]byte{[]byte(goodFull), []byte(good), []byte(withExtra), []byte(badSyntax)} + ts := &testStruct{} + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = m.Unmarshal(ts, benchInput[i%len(benchInput)]) + } +} diff --git a/node/errors.go b/node/errors.go index 45072a1d0e8..0a7ce5ad99e 100644 --- a/node/errors.go +++ b/node/errors.go @@ -88,6 +88,9 @@ var ErrNilInterceptorsContainer = errors.New("nil interceptors container") // ErrNilResolversFinder signals that a nil resolvers finder has been provided var ErrNilResolversFinder = errors.New("nil resolvers finder") +// ErrNilEpochStartTrigger signals that a nil start of epoch trigger has been provided +var ErrNilEpochStartTrigger = errors.New("nil start of epoch trigger") + // ErrNilBlockHeader is raised when a valid block header is expected but nil was used var ErrNilBlockHeader = errors.New("block header is nil") @@ -147,3 +150,6 @@ var ErrCannotConvertToPeerAccount = errors.New("cannot convert to peer account") // ErrInvalidChainID signals that an invalid chain ID has been provided var ErrInvalidChainID = errors.New("invalid chain ID in Node") + +// ErrNilBlockTracker signals that a nil block tracker has been provided +var ErrNilBlockTracker = errors.New("trying to set nil block tracker") diff --git a/node/mock/accountsStub.go b/node/mock/accountsStub.go index 59eaaccd8a7..63ff73fce00 100644 --- a/node/mock/accountsStub.go +++ b/node/mock/accountsStub.go @@ -19,6 +19,11 @@ type AccountsStub struct { SaveDataTrieCalled func(acountWrapper state.AccountHandler) error RootHashCalled func() ([]byte, error) RecreateTrieCalled func(rootHash []byte) error + PruneTrieCalled func(rootHash []byte) error + SnapshotStateCalled func(rootHash []byte) + SetStateCheckpointCalled func(rootHash []byte) + CancelPruneCalled func(rootHash []byte) + IsPruningEnabledCalled func() bool } func (aam *AccountsStub) AddJournalEntry(je state.JournalEntry) { @@ -77,10 +82,27 @@ func (aam *AccountsStub) RecreateTrie(rootHash []byte) error { return aam.RecreateTrieCalled(rootHash) } +func (aam *AccountsStub) PruneTrie(rootHash []byte) error { + return aam.PruneTrieCalled(rootHash) +} + +func (aam *AccountsStub) CancelPrune(rootHash []byte) { + aam.CancelPruneCalled(rootHash) +} + +func (aam *AccountsStub) SnapshotState(rootHash []byte) { + aam.SnapshotStateCalled(rootHash) +} + +func (aam *AccountsStub) SetStateCheckpoint(rootHash []byte) { + aam.SetStateCheckpointCalled(rootHash) +} + +func (aam *AccountsStub) IsPruningEnabled() bool { + return aam.IsPruningEnabledCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (aam *AccountsStub) IsInterfaceNil() bool { - if aam == nil { - return true - } - return false + return aam == nil } diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 4e04cf38931..73a1bc92e93 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) // BlockProcessorStub mocks the implementation for a blockProcessor @@ -16,7 +17,7 @@ type BlockProcessorStub struct { CreateBlockBodyCalled func(initialHdrData data.HeaderHandler, haveTime func() bool) (data.BodyHandler, error) RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - ApplyBodyToHeaderCalled func(header data.HeaderHandler, body data.BodyHandler) error + ApplyBodyToHeaderCalled func(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) DecodeBlockBodyCalled func(dta []byte) data.BodyHandler DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler @@ -56,12 +57,12 @@ func (blProcMock *BlockProcessorStub) CreateGenesisBlock(balances map[string]*bi // RevertStateToBlock recreates thee state tries to the root hashes indicated by the provided header func (blProcMock *BlockProcessorStub) RevertStateToBlock(header data.HeaderHandler) error { if blProcMock.RevertStateToBlockCalled != nil { - return blProcMock.RevertStateToBlock(header) + return blProcMock.RevertStateToBlockCalled(header) } return nil } -// CreateTxBlockBody mocks the creation of a transaction block body +// CreateBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorStub) CreateBlockBody(initialHdrData data.HeaderHandler, haveTime func() bool) (data.BodyHandler, error) { return blProcMock.CreateBlockBodyCalled(initialHdrData, haveTime) } @@ -70,7 +71,7 @@ func (blProcMock *BlockProcessorStub) RestoreBlockIntoPools(header data.HeaderHa return blProcMock.RestoreBlockIntoPoolsCalled(header, body) } -func (blProcMock BlockProcessorStub) ApplyBodyToHeader(header data.HeaderHandler, body data.BodyHandler) error { +func (blProcMock BlockProcessorStub) ApplyBodyToHeader(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { return blProcMock.ApplyBodyToHeaderCalled(header, body) } @@ -99,7 +100,7 @@ func (blProcMock BlockProcessorStub) CreateNewHeader() data.HeaderHandler { return blProcMock.CreateNewHeaderCalled() } -func (bpm *BlockProcessorStub) ApplyProcessedMiniBlocks(miniBlocks map[string]map[string]struct{}) { +func (blProcMock *BlockProcessorStub) ApplyProcessedMiniBlocks(miniBlocks *processedMb.ProcessedMiniBlockTracker) { } diff --git a/node/mock/blockTrackerStub.go b/node/mock/blockTrackerStub.go new file mode 100644 index 00000000000..4003fca8807 --- /dev/null +++ b/node/mock/blockTrackerStub.go @@ -0,0 +1,160 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type BlockTrackerStub struct { + AddTrackedHeaderCalled func(header data.HeaderHandler, hash []byte) + AddCrossNotarizedHeaderCalled func(shardID uint32, crossNotarizedHeader data.HeaderHandler, crossNotarizedHeaderHash []byte) + AddSelfNotarizedHeaderCalled func(shardID uint32, selfNotarizedHeader data.HeaderHandler, selfNotarizedHeaderHash []byte) + CleanupHeadersBehindNonceCalled func(shardID uint32, selfNotarizedNonce uint64, crossNotarizedNonce uint64) + ComputeLongestChainCalled func(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) + ComputeLongestMetaChainFromLastNotarizedCalled func() ([]data.HeaderHandler, [][]byte, error) + ComputeLongestShardsChainsFromLastNotarizedCalled func() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) + DisplayTrackedHeadersCalled func() + GetCrossNotarizedHeaderCalled func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeaderCalled func(shardID uint32) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeadersForAllShardsCalled func() (map[uint32]data.HeaderHandler, error) + GetTrackedHeadersCalled func(shardID uint32) ([]data.HeaderHandler, [][]byte) + GetTrackedHeadersForAllShardsCalled func() map[uint32][]data.HeaderHandler + GetTrackedHeadersWithNonceCalled func(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) + IsShardStuckCalled func(shardId uint32) bool + RegisterCrossNotarizedHeadersHandlerCalled func(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RegisterSelfNotarizedHeadersHandlerCalled func(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RemoveLastNotarizedHeadersCalled func() + RestoreToGenesisCalled func() +} + +func (bts *BlockTrackerStub) AddTrackedHeader(header data.HeaderHandler, hash []byte) { + if bts.AddTrackedHeaderCalled != nil { + bts.AddTrackedHeaderCalled(header, hash) + } +} + +func (bts *BlockTrackerStub) AddCrossNotarizedHeader(shardID uint32, crossNotarizedHeader data.HeaderHandler, crossNotarizedHeaderHash []byte) { + if bts.AddCrossNotarizedHeaderCalled != nil { + bts.AddCrossNotarizedHeaderCalled(shardID, crossNotarizedHeader, crossNotarizedHeaderHash) + } +} + +func (bts *BlockTrackerStub) AddSelfNotarizedHeader(shardID uint32, selfNotarizedHeader data.HeaderHandler, selfNotarizedHeaderHash []byte) { + if bts.AddSelfNotarizedHeaderCalled != nil { + bts.AddSelfNotarizedHeaderCalled(shardID, selfNotarizedHeader, selfNotarizedHeaderHash) + } +} + +func (bts *BlockTrackerStub) CleanupHeadersBehindNonce(shardID uint32, selfNotarizedNonce uint64, crossNotarizedNonce uint64) { + if bts.CleanupHeadersBehindNonceCalled != nil { + bts.CleanupHeadersBehindNonceCalled(shardID, selfNotarizedNonce, crossNotarizedNonce) + } +} + +func (bts *BlockTrackerStub) ComputeLongestChain(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) { + if bts.ComputeLongestChainCalled != nil { + return bts.ComputeLongestChainCalled(shardID, header) + } + return nil, nil +} + +func (bts *BlockTrackerStub) ComputeLongestMetaChainFromLastNotarized() ([]data.HeaderHandler, [][]byte, error) { + if bts.ComputeLongestMetaChainFromLastNotarizedCalled != nil { + return bts.ComputeLongestMetaChainFromLastNotarizedCalled() + } + + return nil, nil, nil +} + +func (bts *BlockTrackerStub) ComputeLongestShardsChainsFromLastNotarized() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) { + if bts.ComputeLongestShardsChainsFromLastNotarizedCalled != nil { + return bts.ComputeLongestShardsChainsFromLastNotarizedCalled() + } + + return nil, nil, nil, nil +} + +func (bts *BlockTrackerStub) DisplayTrackedHeaders() { + if bts.DisplayTrackedHeadersCalled != nil { + bts.DisplayTrackedHeadersCalled() + } +} + +func (bts *BlockTrackerStub) GetCrossNotarizedHeader(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + if bts.GetCrossNotarizedHeaderCalled != nil { + return bts.GetCrossNotarizedHeaderCalled(shardID, offset) + } + + return nil, nil, nil +} + +func (bts *BlockTrackerStub) GetLastCrossNotarizedHeader(shardID uint32) (data.HeaderHandler, []byte, error) { + if bts.GetLastCrossNotarizedHeaderCalled != nil { + return bts.GetLastCrossNotarizedHeaderCalled(shardID) + } + + return nil, nil, nil +} + +func (bts *BlockTrackerStub) GetLastCrossNotarizedHeadersForAllShards() (map[uint32]data.HeaderHandler, error) { + if bts.GetLastCrossNotarizedHeadersForAllShardsCalled != nil { + return bts.GetLastCrossNotarizedHeadersForAllShardsCalled() + } + + return nil, nil +} + +func (bts *BlockTrackerStub) GetTrackedHeaders(shardID uint32) ([]data.HeaderHandler, [][]byte) { + if bts.GetTrackedHeadersCalled != nil { + return bts.GetTrackedHeadersCalled(shardID) + } + + return nil, nil +} + +func (bts *BlockTrackerStub) GetTrackedHeadersForAllShards() map[uint32][]data.HeaderHandler { + if bts.GetTrackedHeadersForAllShardsCalled != nil { + return bts.GetTrackedHeadersForAllShardsCalled() + } + + return nil +} + +func (bts *BlockTrackerStub) GetTrackedHeadersWithNonce(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) { + if bts.GetTrackedHeadersWithNonceCalled != nil { + return bts.GetTrackedHeadersWithNonceCalled(shardID, nonce) + } + + return nil, nil +} + +func (bts *BlockTrackerStub) IsShardStuck(shardId uint32) bool { + return bts.IsShardStuckCalled(shardId) +} + +func (bts *BlockTrackerStub) RegisterCrossNotarizedHeadersHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) { + if bts.RegisterCrossNotarizedHeadersHandlerCalled != nil { + bts.RegisterCrossNotarizedHeadersHandlerCalled(handler) + } +} + +func (bts *BlockTrackerStub) RegisterSelfNotarizedHeadersHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) { + if bts.RegisterSelfNotarizedHeadersHandlerCalled != nil { + bts.RegisterSelfNotarizedHeadersHandlerCalled(handler) + } +} + +func (bts *BlockTrackerStub) RemoveLastNotarizedHeaders() { + if bts.RemoveLastNotarizedHeadersCalled != nil { + bts.RemoveLastNotarizedHeadersCalled() + } +} + +func (bts *BlockTrackerStub) RestoreToGenesis() { + if bts.RestoreToGenesisCalled != nil { + bts.RestoreToGenesisCalled() + } +} + +func (bts *BlockTrackerStub) IsInterfaceNil() bool { + return bts == nil +} diff --git a/node/mock/forkDetectorMock.go b/node/mock/forkDetectorMock.go index dac5e18f8f7..626309b2a71 100644 --- a/node/mock/forkDetectorMock.go +++ b/node/mock/forkDetectorMock.go @@ -7,47 +7,55 @@ import ( // ForkDetectorMock is a mock implementation for the ForkDetector interface type ForkDetectorMock struct { - AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error - RemoveHeadersCalled func(nonce uint64, hash []byte) + AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error + RemoveHeaderCalled func(nonce uint64, hash []byte) CheckForkCalled func() *process.ForkInfo GetHighestFinalBlockNonceCalled func() uint64 + GetHighestFinalBlockHashCalled func() []byte ProbableHighestNonceCalled func() uint64 - ResetProbableHighestNonceCalled func() ResetForkCalled func() GetNotarizedHeaderHashCalled func(nonce uint64) []byte + SetRollBackNonceCalled func(nonce uint64) + RestoreToGenesisCalled func() } -func (f *ForkDetectorMock) RestoreFinalCheckPointToGenesis() { - +func (fdm *ForkDetectorMock) RestoreToGenesis() { + fdm.RestoreToGenesisCalled() } // AddHeader is a mock implementation for AddHeader -func (f *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { - return f.AddHeaderCalled(header, hash, state, finalHeaders, finalHeadersHashes, isNotarizedShardStuck) +func (fdm *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) } -// RemoveHeaders is a mock implementation for RemoveHeaders -func (f *ForkDetectorMock) RemoveHeaders(nonce uint64, hash []byte) { - f.RemoveHeadersCalled(nonce, hash) +// RemoveHeader is a mock implementation for RemoveHeader +func (fdm *ForkDetectorMock) RemoveHeader(nonce uint64, hash []byte) { + fdm.RemoveHeaderCalled(nonce, hash) } // CheckFork is a mock implementation for CheckFork -func (f *ForkDetectorMock) CheckFork() *process.ForkInfo { - return f.CheckForkCalled() +func (fdm *ForkDetectorMock) CheckFork() *process.ForkInfo { + return fdm.CheckForkCalled() } // GetHighestFinalBlockNonce is a mock implementation for GetHighestFinalBlockNonce -func (f *ForkDetectorMock) GetHighestFinalBlockNonce() uint64 { - return f.GetHighestFinalBlockNonceCalled() +func (fdm *ForkDetectorMock) GetHighestFinalBlockNonce() uint64 { + return fdm.GetHighestFinalBlockNonceCalled() +} + +func (fdm *ForkDetectorMock) GetHighestFinalBlockHash() []byte { + return fdm.GetHighestFinalBlockHashCalled() } // ProbableHighestNonce is a mock implementation for GetProbableHighestNonce -func (f *ForkDetectorMock) ProbableHighestNonce() uint64 { - return f.ProbableHighestNonceCalled() +func (fdm *ForkDetectorMock) ProbableHighestNonce() uint64 { + return fdm.ProbableHighestNonceCalled() } -func (fdm *ForkDetectorMock) ResetProbableHighestNonce() { - fdm.ResetProbableHighestNonceCalled() +func (fdm *ForkDetectorMock) SetRollBackNonce(nonce uint64) { + if fdm.SetRollBackNonceCalled != nil { + fdm.SetRollBackNonceCalled(nonce) + } } func (fdm *ForkDetectorMock) ResetFork() { @@ -60,8 +68,5 @@ func (fdm *ForkDetectorMock) GetNotarizedHeaderHash(nonce uint64) []byte { // IsInterfaceNil returns true if there is no value under the interface func (fdm *ForkDetectorMock) IsInterfaceNil() bool { - if fdm == nil { - return true - } - return false + return fdm == nil } diff --git a/node/mock/hasherFake.go b/node/mock/hasherFake.go index 447de13d67e..3a823d4d1d8 100644 --- a/node/mock/hasherFake.go +++ b/node/mock/hasherFake.go @@ -11,7 +11,7 @@ type HasherFake struct { // Compute will output the SHA's equivalent of the input string func (sha HasherFake) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } diff --git a/node/mock/hasherMock.go b/node/mock/hasherMock.go index 35dde20398a..6fab4527e39 100644 --- a/node/mock/hasherMock.go +++ b/node/mock/hasherMock.go @@ -22,9 +22,6 @@ func (HasherMock) Size() int { } // IsInterfaceNil returns true if there is no value under the interface -func (hash *HasherMock) IsInterfaceNil() bool { - if hash == nil { - return true - } +func (hash HasherMock) IsInterfaceNil() bool { return false } diff --git a/node/mock/headersCacherStub.go b/node/mock/headersCacherStub.go new file mode 100644 index 00000000000..aec5bcf8c0a --- /dev/null +++ b/node/mock/headersCacherStub.go @@ -0,0 +1,83 @@ +package mock + +import ( + "errors" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type HeadersCacherStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int +} + +func (hcs *HeadersCacherStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hcs.AddCalled != nil { + hcs.AddCalled(headerHash, header) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByHash(headerHash []byte) { + if hcs.RemoveHeaderByHashCalled != nil { + hcs.RemoveHeaderByHashCalled(headerHash) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hcs.RemoveHeaderByNonceAndShardIdCalled != nil { + hcs.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +func (hcs *HeadersCacherStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hcs.GetHeaderByNonceAndShardIdCalled != nil { + return hcs.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +func (hcs *HeadersCacherStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hcs.GetHeaderByHashCalled != nil { + return hcs.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +func (hcs *HeadersCacherStub) Clear() { + if hcs.ClearCalled != nil { + hcs.ClearCalled() + } +} + +func (hcs *HeadersCacherStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hcs.RegisterHandlerCalled != nil { + hcs.RegisterHandlerCalled(handler) + } +} + +func (hcs *HeadersCacherStub) Nonces(shardId uint32) []uint64 { + if hcs.NoncesCalled != nil { + return hcs.NoncesCalled(shardId) + } + return nil +} + +func (hcs *HeadersCacherStub) Len() int { + return 0 +} + +func (hcs *HeadersCacherStub) MaxSize() int { + return 100 +} + +func (hcs *HeadersCacherStub) IsInterfaceNil() bool { + return hcs == nil +} diff --git a/node/mock/metaPoolsHolderStub.go b/node/mock/metaPoolsHolderStub.go index 41d87fe259f..41de658af9e 100644 --- a/node/mock/metaPoolsHolderStub.go +++ b/node/mock/metaPoolsHolderStub.go @@ -6,10 +6,9 @@ import ( ) type MetaPoolsHolderStub struct { - MetaBlocksCalled func() storage.Cacher MiniBlocksCalled func() storage.Cacher - ShardHeadersCalled func() storage.Cacher - HeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher + HeadersCalled func() dataRetriever.HeadersPool + TrieNodesCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier CurrBlockTxsCalled func() dataRetriever.TransactionCacher @@ -27,20 +26,16 @@ func (mphs *MetaPoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDat return mphs.UnsignedTransactionsCalled() } -func (mphs *MetaPoolsHolderStub) MetaBlocks() storage.Cacher { - return mphs.MetaBlocksCalled() -} - func (mphs *MetaPoolsHolderStub) MiniBlocks() storage.Cacher { return mphs.MiniBlocksCalled() } -func (mphs *MetaPoolsHolderStub) ShardHeaders() storage.Cacher { - return mphs.ShardHeadersCalled() +func (mphs *MetaPoolsHolderStub) Headers() dataRetriever.HeadersPool { + return mphs.HeadersCalled() } -func (mphs *MetaPoolsHolderStub) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return mphs.HeadersNoncesCalled() +func (mphs *MetaPoolsHolderStub) TrieNodes() storage.Cacher { + return mphs.TrieNodesCalled() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/mock/poolsHolderStub.go b/node/mock/poolsHolderStub.go index 35a1d5e92db..5318607eb2d 100644 --- a/node/mock/poolsHolderStub.go +++ b/node/mock/poolsHolderStub.go @@ -6,29 +6,24 @@ import ( ) type PoolsHolderStub struct { - HeadersCalled func() storage.Cacher - HeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher + HeadersCalled func() dataRetriever.HeadersPool PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher - MetaBlocksCalled func() storage.Cacher CurrBlockTxsCalled func() dataRetriever.TransactionCacher + TrieNodesCalled func() storage.Cacher } func (phs *PoolsHolderStub) CurrentBlockTxs() dataRetriever.TransactionCacher { return phs.CurrBlockTxsCalled() } -func (phs *PoolsHolderStub) Headers() storage.Cacher { +func (phs *PoolsHolderStub) Headers() dataRetriever.HeadersPool { return phs.HeadersCalled() } -func (phs *PoolsHolderStub) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phs.HeadersNoncesCalled() -} - func (phs *PoolsHolderStub) PeerChangesBlocks() storage.Cacher { return phs.PeerChangesBlocksCalled() } @@ -41,10 +36,6 @@ func (phs *PoolsHolderStub) MiniBlocks() storage.Cacher { return phs.MiniBlocksCalled() } -func (phs *PoolsHolderStub) MetaBlocks() storage.Cacher { - return phs.MetaBlocksCalled() -} - func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { return phs.UnsignedTransactionsCalled() } @@ -53,6 +44,10 @@ func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacher return phs.RewardTransactionsCalled() } +func (phs *PoolsHolderStub) TrieNodes() storage.Cacher { + return phs.TrieNodesCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/node/mock/shardedDataStub.go b/node/mock/shardedDataStub.go index 688a94904dd..3fa0868838e 100644 --- a/node/mock/shardedDataStub.go +++ b/node/mock/shardedDataStub.go @@ -47,10 +47,6 @@ func (sd *ShardedDataStub) MergeShardStores(sourceCacheId, destCacheId string) { sd.MergeShardStoresCalled(sourceCacheId, destCacheId) } -func (sd *ShardedDataStub) MoveData(sourceCacheId, destCacheId string, key [][]byte) { - sd.MoveDataCalled(sourceCacheId, destCacheId, key) -} - func (sd *ShardedDataStub) Clear() { sd.ClearCalled() } diff --git a/node/node.go b/node/node.go index 52f9c377d04..c2b29aad06a 100644 --- a/node/node.go +++ b/node/node.go @@ -25,6 +25,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" @@ -58,6 +59,7 @@ type Option func(*Node) error // required services as requested type Node struct { marshalizer marshal.Marshalizer + sizeCheckDelta uint32 ctx context.Context hasher hashing.Hasher feeHandler process.FeeHandler @@ -70,6 +72,7 @@ type Node struct { rounder consensus.Rounder blockProcessor process.BlockProcessor genesisTime time.Time + epochStartTrigger epochStart.TriggerHandler accounts state.AccountsAdapter addrConverter state.AddressConverter uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter @@ -113,6 +116,7 @@ type Node struct { headerSigVerifier spos.RandSeedVerifier chainID []byte + blockTracker process.BlockTracker } // ApplyOptions can set up different configurable options of a Node instance @@ -269,6 +273,10 @@ func (n *Node) StartConsensus() error { return err } + netInputMarshalizer := n.marshalizer + if n.sizeCheckDelta > 0 { + netInputMarshalizer = marshal.NewSizeCheckUnmarshalizer(n.marshalizer, n.sizeCheckDelta) + } worker, err := spos.NewWorker( consensusService, n.blkc, @@ -278,7 +286,7 @@ func (n *Node) StartConsensus() error { consensusState, n.forkDetector, n.keyGen, - n.marshalizer, + netInputMarshalizer, n.rounder, n.shardCoordinator, n.singleSigner, @@ -370,7 +378,8 @@ func (n *Node) createChronologyHandler(rounder consensus.Rounder, appStatusHandl chr, err := chronology.NewChronology( n.genesisTime, rounder, - n.syncTimer) + n.syncTimer, + ) if err != nil { return nil, err @@ -398,6 +407,11 @@ func (n *Node) createBootstrapper(rounder consensus.Rounder) (process.Bootstrapp } func (n *Node) createShardBootstrapper(rounder consensus.Rounder) (process.Bootstrapper, error) { + accountsWrapper, err := state.NewAccountsDbWrapperSync(n.accounts) + if err != nil { + return nil, err + } + storageBootstrapArguments := storageBootstrap.ArgsStorageBootstrapper{ ResolversFinder: n.resolversFinder, BootStorer: n.bootStorer, @@ -409,6 +423,7 @@ func (n *Node) createShardBootstrapper(rounder consensus.Rounder) (process.Boots Uint64Converter: n.uint64ByteSliceConverter, BootstrapRoundIndex: n.bootstrapRoundIndex, ShardCoordinator: n.shardCoordinator, + BlockTracker: n.blockTracker, } shardStorageBootstrapper, err := storageBootstrap.NewShardStorageBootstrapper(storageBootstrapArguments) @@ -428,7 +443,7 @@ func (n *Node) createShardBootstrapper(rounder consensus.Rounder) (process.Boots n.forkDetector, n.resolversFinder, n.shardCoordinator, - n.accounts, + accountsWrapper, n.blackListHandler, n.messenger, n.bootStorer, @@ -454,6 +469,7 @@ func (n *Node) createMetaChainBootstrapper(rounder consensus.Rounder) (process.B Uint64Converter: n.uint64ByteSliceConverter, BootstrapRoundIndex: n.bootstrapRoundIndex, ShardCoordinator: n.shardCoordinator, + BlockTracker: n.blockTracker, } metaStorageBootstrapper, err := storageBootstrap.NewMetaStorageBootstrapper(storageBootstrapArguments) @@ -479,6 +495,7 @@ func (n *Node) createMetaChainBootstrapper(rounder consensus.Rounder) (process.B n.bootStorer, metaStorageBootstrapper, n.requestedItemsHandler, + n.epochStartTrigger, ) if err != nil { @@ -548,7 +565,7 @@ func (n *Node) SendTransaction( value string, gasPrice uint64, gasLimit uint64, - transactionData string, + transactionData []byte, signature []byte) (string, error) { if n.shardCoordinator == nil || n.shardCoordinator.IsInterfaceNil() { @@ -727,9 +744,8 @@ func (n *Node) CreateTransaction( senderHex string, gasPrice uint64, gasLimit uint64, - data string, + data []byte, signatureHex string, - challenge string, ) (*transaction.Transaction, error) { if n.addrConverter == nil || n.addrConverter.IsInterfaceNil() { @@ -755,11 +771,6 @@ func (n *Node) CreateTransaction( return nil, errors.New("could not fetch signature bytes") } - challengeBytes, err := hex.DecodeString(challenge) - if err != nil { - return nil, errors.New("could not fetch challenge bytes") - } - valAsBigInt, ok := big.NewInt(0).SetString(value, 10) if !ok { return nil, ErrInvalidValue @@ -772,9 +783,8 @@ func (n *Node) CreateTransaction( SndAddr: senderAddress.Bytes(), GasPrice: gasPrice, GasLimit: gasLimit, - Data: data, + Data: []byte(data), Signature: signatureBytes, - Challenge: challengeBytes, }, nil } @@ -864,6 +874,7 @@ func (n *Node) StartHeartbeat(hbConfig config.HeartbeatConfig, versionNumber str } heartbeatStorageUnit := n.store.GetStorer(dataRetriever.HeartbeatUnit) + heartBeatMsgProcessor, err := heartbeat.NewMessageProcessor( n.singleSigner, n.keyGen, @@ -874,8 +885,12 @@ func (n *Node) StartHeartbeat(hbConfig config.HeartbeatConfig, versionNumber str heartbeatStorer, err := storage.NewHeartbeatDbStorer(heartbeatStorageUnit, n.marshalizer) timer := &heartbeat.RealTimer{} + netInputMarshalizer := n.marshalizer + if n.sizeCheckDelta > 0 { + netInputMarshalizer = marshal.NewSizeCheckUnmarshalizer(n.marshalizer, n.sizeCheckDelta) + } n.heartbeatMonitor, err = heartbeat.NewMonitor( - n.marshalizer, + netInputMarshalizer, time.Second*time.Duration(hbConfig.DurationInSecToConsiderUnresponsive), n.initialNodesPubkeys, n.genesisTime, diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 8ed2330f736..6c3e8b5e4ed 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -197,7 +197,7 @@ func (n *Node) generateAndSignSingleTx( GasPrice: minTxGasPrice, RcvAddr: rcvAddrBytes, SndAddr: sndAddrBytes, - Data: data, + Data: []byte(data), } marshalizedTx, err := n.marshalizer.Marshal(&tx) diff --git a/node/nodeTesting_test.go b/node/nodeTesting_test.go index a8693a4a6f4..7979dc7ffd0 100644 --- a/node/nodeTesting_test.go +++ b/node/nodeTesting_test.go @@ -18,6 +18,8 @@ import ( "github.com/stretchr/testify/assert" ) +const testSizeCheckDelta = 100 + var timeoutWait = time.Second //------- GenerateAndSendBulkTransactions @@ -39,7 +41,7 @@ func TestGenerateAndSendBulkTransactions_NilAccountAdapterShouldErr(t *testing.T singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithHasher(&mock.HasherMock{}), node.WithAddressConverter(addrConverter), node.WithTxSignPrivKey(sk), @@ -61,7 +63,7 @@ func TestGenerateAndSendBulkTransactions_NilSingleSignerShouldErr(t *testing.T) accAdapter := getAccAdapter(big.NewInt(0)) n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithAccountsAdapter(accAdapter), node.WithHasher(&mock.HasherMock{}), node.WithAddressConverter(addrConverter), @@ -84,7 +86,7 @@ func TestGenerateAndSendBulkTransactions_NilShardCoordinatorShouldErr(t *testing singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithAccountsAdapter(accAdapter), node.WithHasher(&mock.HasherMock{}), node.WithAddressConverter(addrConverter), @@ -105,7 +107,7 @@ func TestGenerateAndSendBulkTransactions_NilAddressConverterShouldErr(t *testing singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithHasher(&mock.HasherMock{}), node.WithAccountsAdapter(accAdapter), node.WithTxSignPrivKey(sk), @@ -136,7 +138,7 @@ func TestGenerateAndSendBulkTransactions_NilPrivateKeyShouldErr(t *testing.T) { node.WithAccountsAdapter(accAdapter), node.WithAddressConverter(addrConverter), node.WithTxSignPubKey(pk), - node.WithMarshalizer(&mock.MarshalizerFake{}), + node.WithMarshalizer(&mock.MarshalizerFake{}, testSizeCheckDelta), node.WithTxSingleSigner(singleSigner), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithDataPool(dataPool), @@ -232,7 +234,7 @@ func TestGenerateAndSendBulkTransactions_MarshalizerErrorsShouldErr(t *testing.T node.WithAddressConverter(addrConverter), node.WithTxSignPrivKey(sk), node.WithTxSignPubKey(pk), - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithTxSingleSigner(singleSigner), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithDataPool(dataPool), @@ -305,7 +307,7 @@ func TestGenerateAndSendBulkTransactions_ShouldWork(t *testing.T) { keyGen := &mock.KeyGenMock{} sk, pk := keyGen.GeneratePair() n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithHasher(&mock.HasherMock{}), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), diff --git a/node/node_test.go b/node/node_test.go index 4ab76c6d9ee..47258960534 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -118,7 +118,7 @@ func TestStart_CorrectParams(t *testing.T) { messenger := getMessenger() n, _ := node.NewNode( node.WithMessenger(messenger), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -135,7 +135,7 @@ func TestStart_CorrectParamsApplyingOptions(t *testing.T) { messenger := getMessenger() err := n.ApplyOptions( node.WithMessenger(messenger), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -154,7 +154,7 @@ func TestApplyOptions_NodeStarted(t *testing.T) { messenger := getMessenger() n, _ := node.NewNode( node.WithMessenger(messenger), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), ) err := n.Start() @@ -167,7 +167,7 @@ func TestApplyOptions_NodeStarted(t *testing.T) { func TestStop_NotStartedYet(t *testing.T) { n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), ) @@ -184,7 +184,7 @@ func TestStop_MessengerCloseErrors(t *testing.T) { } n, _ := node.NewNode( node.WithMessenger(messenger), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), ) @@ -198,7 +198,7 @@ func TestStop_MessengerCloseErrors(t *testing.T) { func TestStop(t *testing.T) { n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), ) err := n.Start() @@ -212,7 +212,7 @@ func TestStop(t *testing.T) { func TestGetBalance_NoAddrConverterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAccountsAdapter(&mock.AccountsStub{}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), @@ -225,7 +225,7 @@ func TestGetBalance_NoAddrConverterShouldError(t *testing.T) { func TestGetBalance_NoAccAdapterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), @@ -250,7 +250,7 @@ func TestGetBalance_CreateAddressFailsShouldError(t *testing.T) { privateKey := getPrivateKey() singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -272,7 +272,7 @@ func TestGetBalance_GetAccountFailsShouldError(t *testing.T) { addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -304,7 +304,7 @@ func TestGetBalance_GetAccountReturnsNil(t *testing.T) { addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -321,7 +321,7 @@ func TestGetBalance(t *testing.T) { addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -337,7 +337,7 @@ func TestGetBalance(t *testing.T) { func TestGenerateTransaction_NoAddrConverterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAccountsAdapter(&mock.AccountsStub{}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), @@ -349,7 +349,7 @@ func TestGenerateTransaction_NoAddrConverterShouldError(t *testing.T) { func TestGenerateTransaction_NoAccAdapterShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), @@ -361,7 +361,7 @@ func TestGenerateTransaction_NoAccAdapterShouldError(t *testing.T) { func TestGenerateTransaction_NoPrivateKeyShouldError(t *testing.T) { n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -376,7 +376,7 @@ func TestGenerateTransaction_CreateAddressFailsShouldError(t *testing.T) { addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -396,7 +396,7 @@ func TestGenerateTransaction_GetAccountFailsShouldError(t *testing.T) { addrConverter := mock.NewAddressConverterFake(32, "0x") privateKey := getPrivateKey() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -419,7 +419,7 @@ func TestGenerateTransaction_GetAccountReturnsNilShouldWork(t *testing.T) { singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -438,7 +438,7 @@ func TestGenerateTransaction_GetExistingAccountShouldWork(t *testing.T) { singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -461,7 +461,7 @@ func TestGenerateTransaction_MarshalErrorsShouldError(t *testing.T) { }, } n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -480,7 +480,7 @@ func TestGenerateTransaction_SignTxErrorsShouldError(t *testing.T) { singleSigner := &mock.SinglesignFailMock{} n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -500,7 +500,7 @@ func TestGenerateTransaction_ShouldSetCorrectSignature(t *testing.T) { singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -530,7 +530,7 @@ func TestGenerateTransaction_ShouldSetCorrectNonce(t *testing.T) { singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -551,7 +551,7 @@ func TestGenerateTransaction_CorrectParamsShouldNotError(t *testing.T) { singleSigner := &mock.SinglesignMock{} n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), @@ -566,7 +566,7 @@ func TestCreateTransaction_NilAddrConverterShouldErr(t *testing.T) { t.Parallel() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAccountsAdapter(&mock.AccountsStub{}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), @@ -578,11 +578,10 @@ func TestCreateTransaction_NilAddrConverterShouldErr(t *testing.T) { sender := "" gasPrice := uint64(10) gasLimit := uint64(20) - txData := "-" + txData := []byte("-") signature := "-" - challenge := "-" - tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature, challenge) + tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature) assert.Nil(t, tx) assert.Equal(t, node.ErrNilAddressConverter, err) @@ -592,7 +591,7 @@ func TestCreateTransaction_NilAccountsAdapterShouldErr(t *testing.T) { t.Parallel() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{ CreateAddressFromHexHandler: func(hexAddress string) (container state.AddressContainer, e error) { @@ -608,11 +607,10 @@ func TestCreateTransaction_NilAccountsAdapterShouldErr(t *testing.T) { sender := "" gasPrice := uint64(10) gasLimit := uint64(20) - txData := "-" + txData := []byte("-") signature := "-" - challenge := "-" - tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature, challenge) + tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature) assert.Nil(t, tx) assert.Equal(t, node.ErrNilAccountsAdapter, err) @@ -622,7 +620,7 @@ func TestCreateTransaction_InvalidSignatureShouldErr(t *testing.T) { t.Parallel() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{ CreateAddressFromHexHandler: func(hexAddress string) (container state.AddressContainer, e error) { @@ -639,11 +637,10 @@ func TestCreateTransaction_InvalidSignatureShouldErr(t *testing.T) { sender := "snd" gasPrice := uint64(10) gasLimit := uint64(20) - txData := "-" + txData := []byte("-") signature := "-" - challenge := "af4e5" - tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature, challenge) + tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature) assert.Nil(t, tx) assert.NotNil(t, err) @@ -653,7 +650,7 @@ func TestCreateTransaction_OkValsShouldWork(t *testing.T) { t.Parallel() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{ CreateAddressFromHexHandler: func(hexAddress string) (container state.AddressContainer, e error) { @@ -670,11 +667,10 @@ func TestCreateTransaction_OkValsShouldWork(t *testing.T) { sender := "snd" gasPrice := uint64(10) gasLimit := uint64(20) - txData := "-" + txData := []byte("-") signature := "617eff4f" - challenge := "aff64e" - tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature, challenge) + tx, err := n.CreateTransaction(nonce, value.String(), receiver, sender, gasPrice, gasLimit, txData, signature) assert.NotNil(t, tx) assert.Nil(t, err) @@ -691,7 +687,7 @@ func TestSendBulkTransactions_NoTxShouldErr(t *testing.T) { hasher := &mock.HasherFake{} adrConverter := mock.NewAddressConverterFake(32, "0x") n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithAddressConverter(adrConverter), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithMessenger(mes), @@ -717,7 +713,7 @@ func TestSendTransaction_ShouldWork(t *testing.T) { adrConverter := mock.NewAddressConverterFake(32, "0x") n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithAddressConverter(adrConverter), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), node.WithMessenger(mes), @@ -741,7 +737,7 @@ func TestSendTransaction_ShouldWork(t *testing.T) { value.String(), 0, 0, - txData, + []byte(txData), signature) marshalizedTx, _ := marshalizer.Marshal(&transaction.Transaction{ @@ -749,7 +745,7 @@ func TestSendTransaction_ShouldWork(t *testing.T) { Value: value, SndAddr: senderBuff.Bytes(), RcvAddr: receiverBuff.Bytes(), - Data: txData, + Data: []byte(txData), Signature: signature, }) txHexHashExpected := hex.EncodeToString(hasher.Compute(string(marshalizedTx))) @@ -766,7 +762,7 @@ func TestCreateShardedStores_NilShardCoordinatorShouldError(t *testing.T) { n, _ := node.NewNode( node.WithMessenger(messenger), node.WithDataPool(dataPool), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -785,7 +781,7 @@ func TestCreateShardedStores_NilDataPoolShouldError(t *testing.T) { n, _ := node.NewNode( node.WithMessenger(messenger), node.WithShardCoordinator(shardCoordinator), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -805,14 +801,14 @@ func TestCreateShardedStores_NilTransactionDataPoolShouldError(t *testing.T) { dataPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return nil } - dataPool.HeadersCalled = func() storage.Cacher { - return &mock.CacherStub{} + dataPool.HeadersCalled = func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} } n, _ := node.NewNode( node.WithMessenger(messenger), node.WithShardCoordinator(shardCoordinator), node.WithDataPool(dataPool), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -832,14 +828,15 @@ func TestCreateShardedStores_NilHeaderDataPoolShouldError(t *testing.T) { dataPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } - dataPool.HeadersCalled = func() storage.Cacher { + + dataPool.HeadersCalled = func() dataRetriever.HeadersPool { return nil } n, _ := node.NewNode( node.WithMessenger(messenger), node.WithShardCoordinator(shardCoordinator), node.WithDataPool(dataPool), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -864,18 +861,17 @@ func TestCreateShardedStores_ReturnsSuccessfully(t *testing.T) { txShardedData.CreateShardStoreCalled = func(cacherId string) { txShardedStores = append(txShardedStores, cacherId) } - headerShardedData := &mock.CacherStub{} dataPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txShardedData } - dataPool.HeadersCalled = func() storage.Cacher { - return headerShardedData + dataPool.HeadersCalled = func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} } n, _ := node.NewNode( node.WithMessenger(messenger), node.WithShardCoordinator(shardCoordinator), node.WithDataPool(dataPool), - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithHasher(getHasher()), node.WithAddressConverter(&mock.AddressConverterStub{}), node.WithAccountsAdapter(&mock.AccountsStub{}), @@ -1038,7 +1034,7 @@ func TestNode_StartHeartbeatNilKeygenShouldErr(t *testing.T) { t.Parallel() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithMessenger(&mock.MessengerStub{ HasTopicCalled: func(name string) bool { @@ -1080,7 +1076,7 @@ func TestNode_StartHeartbeatHasTopicValidatorShouldErr(t *testing.T) { t.Parallel() n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithKeyGen(&mock.KeyGenMock{}), node.WithMessenger(&mock.MessengerStub{ @@ -1114,7 +1110,7 @@ func TestNode_StartHeartbeatCreateTopicFailsShouldErr(t *testing.T) { errExpected := errors.New("expected error") n, _ := node.NewNode( - node.WithMarshalizer(getMarshalizer()), + node.WithMarshalizer(getMarshalizer(), testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithKeyGen(&mock.KeyGenMock{}), node.WithMessenger(&mock.MessengerStub{ @@ -1154,7 +1150,7 @@ func TestNode_StartHeartbeatRegisterMessageProcessorFailsShouldErr(t *testing.T) errExpected := errors.New("expected error") n, _ := node.NewNode( - node.WithMarshalizer(&mock.MarshalizerMock{}), + node.WithMarshalizer(&mock.MarshalizerMock{}, testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithKeyGen(&mock.KeyGenMock{}), node.WithMessenger(&mock.MessengerStub{ @@ -1203,7 +1199,7 @@ func TestNode_StartHeartbeatShouldWorkAndCallSendHeartbeat(t *testing.T) { MarshalHandler: func(obj interface{}) (bytes []byte, e error) { return buffData, nil }, - }), + }, testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithKeyGen(&mock.KeyGenMock{}), node.WithMessenger(&mock.MessengerStub{ @@ -1264,7 +1260,7 @@ func TestNode_StartHeartbeatShouldWorkAndHaveAllPublicKeys(t *testing.T) { MarshalHandler: func(obj interface{}) (bytes []byte, e error) { return make([]byte, 0), nil }, - }), + }, testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithKeyGen(&mock.KeyGenMock{}), node.WithMessenger(&mock.MessengerStub{ @@ -1323,7 +1319,7 @@ func TestNode_StartHeartbeatShouldSetNodesFromInitialPubKeysAsValidators(t *test MarshalHandler: func(obj interface{}) (bytes []byte, e error) { return make([]byte, 0), nil }, - }), + }, testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithKeyGen(&mock.KeyGenMock{}), node.WithMessenger(&mock.MessengerStub{ @@ -1386,7 +1382,7 @@ func TestNode_StartHeartbeatShouldWorkAndCanCallProcessMessage(t *testing.T) { MarshalHandler: func(obj interface{}) (bytes []byte, e error) { return make([]byte, 0), nil }, - }), + }, testSizeCheckDelta), node.WithSingleSigner(&mock.SinglesignMock{}), node.WithKeyGen(&mock.KeyGenMock{}), node.WithMessenger(&mock.MessengerStub{ @@ -1597,7 +1593,6 @@ func TestNode_AppStatusHandlersShouldIncrement(t *testing.T) { metricKey := core.MetricCurrentRound incrementCalled := make(chan bool, 1) - // create a prometheus status handler which will be passed to the facade appStatusHandlerStub := mock.AppStatusHandlerStub{ IncrementHandler: func(key string) { incrementCalled <- true @@ -1623,7 +1618,6 @@ func TestNode_AppStatusHandlerShouldDecrement(t *testing.T) { metricKey := core.MetricCurrentRound decrementCalled := make(chan bool, 1) - // create a prometheus status handler which will be passed to the facade appStatusHandlerStub := mock.AppStatusHandlerStub{ DecrementHandler: func(key string) { decrementCalled <- true @@ -1649,7 +1643,6 @@ func TestNode_AppStatusHandlerShouldSetInt64Value(t *testing.T) { metricKey := core.MetricCurrentRound setInt64ValueCalled := make(chan bool, 1) - // create a prometheus status handler which will be passed to the facade appStatusHandlerStub := mock.AppStatusHandlerStub{ SetInt64ValueHandler: func(key string, value int64) { setInt64ValueCalled <- true @@ -1675,7 +1668,6 @@ func TestNode_AppStatusHandlerShouldSetUInt64Value(t *testing.T) { metricKey := core.MetricCurrentRound setUInt64ValueCalled := make(chan bool, 1) - // create a prometheus status handler which will be passed to the facade appStatusHandlerStub := mock.AppStatusHandlerStub{ SetUInt64ValueHandler: func(key string, value uint64) { setUInt64ValueCalled <- true @@ -1722,9 +1714,8 @@ func TestNode_SendBulkTransactionsMultiShardTxsShouldBeMappedCorrectly(t *testin SndAddr: []byte("senderShard0"), GasPrice: 5, GasLimit: 11, - Data: "", + Data: []byte(""), Signature: []byte("sig0"), - Challenge: nil, }) txsToSend = append(txsToSend, &transaction.Transaction{ @@ -1734,9 +1725,8 @@ func TestNode_SendBulkTransactionsMultiShardTxsShouldBeMappedCorrectly(t *testin SndAddr: []byte("senderShard0"), GasPrice: 6, GasLimit: 12, - Data: "", + Data: []byte(""), Signature: []byte("sig1"), - Challenge: nil, }) txsToSend = append(txsToSend, &transaction.Transaction{ @@ -1746,9 +1736,8 @@ func TestNode_SendBulkTransactionsMultiShardTxsShouldBeMappedCorrectly(t *testin SndAddr: []byte("senderShard1"), GasPrice: 7, GasLimit: 13, - Data: "", + Data: []byte(""), Signature: []byte("sig2"), - Challenge: nil, }) wg := sync.WaitGroup{} @@ -1815,7 +1804,7 @@ func TestNode_SendBulkTransactionsMultiShardTxsShouldBeMappedCorrectly(t *testin } sk, pk := keyGen.GeneratePair() n, _ := node.NewNode( - node.WithMarshalizer(marshalizer), + node.WithMarshalizer(marshalizer, testSizeCheckDelta), node.WithHasher(&mock.HasherMock{}), node.WithAddressConverter(addrConverter), node.WithAccountsAdapter(accAdapter), diff --git a/node/options.go b/node/options.go index 3a89d240f8a..46d358c0ee8 100644 --- a/node/options.go +++ b/node/options.go @@ -14,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/epochStart" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/ntp" @@ -33,11 +34,12 @@ func WithMessenger(mes P2PMessenger) Option { } // WithMarshalizer sets up the marshalizer option for the Node -func WithMarshalizer(marshalizer marshal.Marshalizer) Option { +func WithMarshalizer(marshalizer marshal.Marshalizer, sizeCheckDelta uint32) Option { return func(n *Node) error { if marshalizer == nil || marshalizer.IsInterfaceNil() { return ErrNilMarshalizer } + n.sizeCheckDelta = sizeCheckDelta n.marshalizer = marshalizer return nil } @@ -403,6 +405,17 @@ func WithBootstrapRoundIndex(bootstrapRoundIndex uint64) Option { } } +// WithEpochStartTrigger sets up an start of epoch trigger option for the node +func WithEpochStartTrigger(epochStartTrigger epochStart.TriggerHandler) Option { + return func(n *Node) error { + if check.IfNil(epochStartTrigger) { + return ErrNilEpochStartTrigger + } + n.epochStartTrigger = epochStartTrigger + return nil + } +} + // WithAppStatusHandler sets up which handler will monitor the status of the node func WithAppStatusHandler(aph core.AppStatusHandler) Option { return func(n *Node) error { @@ -489,3 +502,14 @@ func WithChainID(chainID []byte) Option { return nil } } + +// WithBlockTracker sets up the block tracker for the Node +func WithBlockTracker(blockTracker process.BlockTracker) Option { + return func(n *Node) error { + if check.IfNil(blockTracker) { + return ErrNilBlockTracker + } + n.blockTracker = blockTracker + return nil + } +} diff --git a/node/options_test.go b/node/options_test.go index e8cb14552c5..0dc2ad9ab48 100644 --- a/node/options_test.go +++ b/node/options_test.go @@ -11,6 +11,8 @@ import ( "github.com/stretchr/testify/assert" ) +const testSizeCheckDelta = 100 + func TestWithMessenger_NilMessengerShouldErr(t *testing.T) { t.Parallel() @@ -42,7 +44,7 @@ func TestWithMarshalizer_NilMarshalizerShouldErr(t *testing.T) { node, _ := NewNode() - opt := WithMarshalizer(nil) + opt := WithMarshalizer(nil, testSizeCheckDelta) err := opt(node) assert.Nil(t, node.marshalizer) @@ -56,10 +58,11 @@ func TestWithMarshalizer_ShouldWork(t *testing.T) { marshalizer := &mock.MarshalizerMock{} - opt := WithMarshalizer(marshalizer) + opt := WithMarshalizer(marshalizer, testSizeCheckDelta) err := opt(node) assert.True(t, node.marshalizer == marshalizer) + assert.True(t, node.sizeCheckDelta == testSizeCheckDelta) assert.Nil(t, err) } @@ -544,6 +547,32 @@ func TestWithShardCoordinator_ShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestWithBlockTracker_NilBlockTrackerShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithBlockTracker(nil) + err := opt(node) + + assert.Nil(t, node.blockTracker) + assert.Equal(t, ErrNilBlockTracker, err) +} + +func TestWithBlockTracker_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + blockTracker := &mock.BlockTrackerStub{} + + opt := WithBlockTracker(blockTracker) + err := opt(node) + + assert.True(t, node.blockTracker == blockTracker) + assert.Nil(t, err) +} + func TestWithNodesCoordinator_NilNodesCoordinatorShouldErr(t *testing.T) { t.Parallel() diff --git a/p2p/libp2p/discovery/kadDhtDiscoverer.go b/p2p/libp2p/discovery/kadDhtDiscoverer.go index 419534bcb12..a0ff8816bd0 100644 --- a/p2p/libp2p/discovery/kadDhtDiscoverer.go +++ b/p2p/libp2p/discovery/kadDhtDiscoverer.go @@ -15,9 +15,6 @@ const ( initReconnectMul = 20 ) -var peerDiscoveryTimeout = 10 * time.Second -var noOfQueries = 1 - const kadDhtName = "kad-dht discovery" var log = logger.GetOrCreate("p2p/libp2p/kaddht") @@ -93,12 +90,6 @@ func (kdd *KadDhtDiscoverer) connectToInitialAndBootstrap() { kdd.refreshInterval, kdd.initialPeersList) - cfg := dht.BootstrapConfig{ - Period: kdd.refreshInterval, - Queries: noOfQueries, - Timeout: peerDiscoveryTimeout, - } - ctx := kdd.contextProvider.Context() go func() { @@ -109,7 +100,7 @@ func (kdd *KadDhtDiscoverer) connectToInitialAndBootstrap() { i := 1 for { if kdd.initConns { - err := kdd.kadDHT.BootstrapOnce(ctx, cfg) + err := kdd.kadDHT.Bootstrap(ctx) if err == kbucket.ErrLookupFailure { <-kdd.ReconnectToNetwork() } @@ -122,7 +113,7 @@ func (kdd *KadDhtDiscoverer) connectToInitialAndBootstrap() { } } select { - case <-time.After(cfg.Period): + case <-time.After(kdd.refreshInterval): case <-ctx.Done(): return } @@ -224,8 +215,5 @@ func (kdd *KadDhtDiscoverer) IsDiscoveryPaused() bool { // IsInterfaceNil returns true if there is no value under the interface func (kdd *KadDhtDiscoverer) IsInterfaceNil() bool { - if kdd == nil { - return true - } - return false + return kdd == nil } diff --git a/p2p/libp2p/discovery/nullDiscoverer.go b/p2p/libp2p/discovery/nullDiscoverer.go index 81774cebcc7..9da07d98636 100644 --- a/p2p/libp2p/discovery/nullDiscoverer.go +++ b/p2p/libp2p/discovery/nullDiscoverer.go @@ -26,14 +26,11 @@ func (nd *NullDiscoverer) Name() string { } // ApplyContext is an empty func as the context is not required -func (nd *NullDiscoverer) ApplyContext(ctxProvider p2p.ContextProvider) error { +func (nd *NullDiscoverer) ApplyContext(_ p2p.ContextProvider) error { return nil } // IsInterfaceNil returns true if there is no value under the interface func (nd *NullDiscoverer) IsInterfaceNil() bool { - if nd == nil { - return true - } - return false + return nd == nil } diff --git a/p2p/libp2p/libp2pConnectionMonitor.go b/p2p/libp2p/libp2pConnectionMonitor.go index a37d203b819..869845f0ab2 100644 --- a/p2p/libp2p/libp2pConnectionMonitor.go +++ b/p2p/libp2p/libp2pConnectionMonitor.go @@ -65,7 +65,7 @@ func (lcm *libp2pConnectionMonitor) doReconn() { } // Connected is called when a connection opened -func (lcm *libp2pConnectionMonitor) Connected(netw network.Network, conn network.Conn) { +func (lcm *libp2pConnectionMonitor) Connected(netw network.Network, _ network.Conn) { if len(netw.Conns()) > lcm.thresholdDiscoveryPause { lcm.reconnecter.Pause() } @@ -79,7 +79,7 @@ func (lcm *libp2pConnectionMonitor) Connected(netw network.Network, conn network } // Disconnected is called when a connection closed -func (lcm *libp2pConnectionMonitor) Disconnected(netw network.Network, conn network.Conn) { +func (lcm *libp2pConnectionMonitor) Disconnected(netw network.Network, _ network.Conn) { lcm.doReconnectionIfNeeded(netw) if len(netw.Conns()) < lcm.thresholdDiscoveryResume && lcm.reconnecter != nil { diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 37554d34347..81cd851167f 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/core/throttler" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/p2p" @@ -18,7 +19,7 @@ import ( "github.com/libp2p/go-libp2p-pubsub" ) -const durationBetweenSends = time.Microsecond * 10 +var sendTimeout = time.Microsecond * 100 // ListenAddrWithIp4AndTcp defines the listening address with ip v.4 and TCP const ListenAddrWithIp4AndTcp = "/ip4/0.0.0.0/tcp/" @@ -53,7 +54,8 @@ type networkMessenger struct { connMonitor *libp2pConnectionMonitor peerDiscoverer p2p.PeerDiscoverer mutTopics sync.RWMutex - topics map[string]p2p.MessageProcessor + topics map[string]*pubsub.Topic + processors map[string]p2p.MessageProcessor outgoingPLB p2p.ChannelLoadBalancer poc *peersOnChannel goRoutinesThrottler *throttler.NumGoRoutineThrottler @@ -96,7 +98,7 @@ func NewNetworkMessenger( libp2p.DefaultSecurity, libp2p.ConnectionManager(conMgr), libp2p.DefaultTransports, - //TODO investigate if the DisableRelay is really needed and why + //we need the disable relay option in order to save the node's bandwidth as much as possible libp2p.DisableRelay(), libp2p.NATPortMap(), } @@ -152,7 +154,8 @@ func createMessenger( netMes := networkMessenger{ ctxProvider: lctx, pb: pb, - topics: make(map[string]p2p.MessageProcessor), + topics: make(map[string]*pubsub.Topic), + processors: make(map[string]p2p.MessageProcessor), outgoingPLB: outgoingPLB, peerDiscoverer: peerDiscoverer, } @@ -178,14 +181,7 @@ func createMessenger( go func(pubsub *pubsub.PubSub, plb p2p.ChannelLoadBalancer) { for { - sendableData := plb.CollectOneElementFromChannels() - - if sendableData == nil { - continue - } - - _ = pb.Publish(sendableData.Topic, sendableData.Buff) - time.Sleep(durationBetweenSends) + netMes.sendMessage() } }(pb, netMes.outgoingPLB) @@ -214,6 +210,32 @@ func createPubSub(ctxProvider *Libp2pContext, withSigning bool) (*pubsub.PubSub, return ps, nil } +func (netMes *networkMessenger) sendMessage() { + sendableData := netMes.outgoingPLB.CollectOneElementFromChannels() + if sendableData == nil { + return + } + + netMes.mutTopics.RLock() + topic := netMes.topics[sendableData.Topic] + netMes.mutTopics.RUnlock() + + if topic == nil { + log.Debug("topic not joined", + "topic", sendableData.Topic) + return + } + + ctx, cancelFunc := context.WithTimeout(context.Background(), sendTimeout) + defer cancelFunc() + + err := topic.Publish(ctx, sendableData.Buff) + if err != nil { + log.Trace("error sending data", + "error", err) + } +} + // Close closes the host, connections and streams func (netMes *networkMessenger) Close() error { return netMes.ctxProvider.Host().Close() @@ -354,11 +376,18 @@ func (netMes *networkMessenger) CreateTopic(name string, createChannelForTopic b } netMes.topics[name] = nil - subscrRequest, err := netMes.pb.Subscribe(name) + topic, err := netMes.pb.Join(name) if err != nil { netMes.mutTopics.Unlock() return err } + subscrRequest, err := topic.Subscribe() + if err != nil { + netMes.mutTopics.Unlock() + return err + } + + netMes.topics[name] = topic netMes.mutTopics.Unlock() if createChannelForTopic { @@ -387,7 +416,7 @@ func (netMes *networkMessenger) HasTopic(name string) bool { // HasTopicValidator returns true if the topic has a validator set func (netMes *networkMessenger) HasTopicValidator(name string) bool { netMes.mutTopics.RLock() - validator, _ := netMes.topics[name] + validator := netMes.processors[name] netMes.mutTopics.RUnlock() return validator != nil @@ -437,16 +466,18 @@ func (netMes *networkMessenger) Broadcast(topic string, buff []byte) { // RegisterMessageProcessor registers a message process on a topic func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { - if handler == nil || handler.IsInterfaceNil() { + if check.IfNil(handler) { return p2p.ErrNilValidator } netMes.mutTopics.Lock() defer netMes.mutTopics.Unlock() - validator, found := netMes.topics[topic] + + _, found := netMes.topics[topic] if !found { return p2p.ErrNilTopic } + validator := netMes.processors[topic] if validator != nil { return p2p.ErrTopicValidatorOperationNotSupported } @@ -467,7 +498,7 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p return err } - netMes.topics[topic] = handler + netMes.processors[topic] = handler return nil } @@ -475,12 +506,12 @@ func (netMes *networkMessenger) RegisterMessageProcessor(topic string, handler p func (netMes *networkMessenger) UnregisterMessageProcessor(topic string) error { netMes.mutTopics.Lock() defer netMes.mutTopics.Unlock() - validator, found := netMes.topics[topic] + _, found := netMes.topics[topic] if !found { return p2p.ErrNilTopic } - + validator := netMes.processors[topic] if validator == nil { return p2p.ErrTopicValidatorOperationNotSupported } @@ -491,6 +522,7 @@ func (netMes *networkMessenger) UnregisterMessageProcessor(topic string) error { } netMes.topics[topic] = nil + return nil } @@ -503,7 +535,7 @@ func (netMes *networkMessenger) directMessageHandler(message p2p.MessageP2P) err var processor p2p.MessageProcessor netMes.mutTopics.RLock() - processor = netMes.topics[message.TopicIDs()[0]] + processor = netMes.processors[message.TopicIDs()[0]] netMes.mutTopics.RUnlock() if processor == nil { diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index 39cfec686b8..e7c0d38169e 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -292,11 +292,6 @@ func TestNewNetworkMessenger_NoConnMgrShouldWork(t *testing.T) { } func TestNewNetworkMessenger_WithConnMgrShouldWork(t *testing.T) { - //TODO remove skip when external library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -331,11 +326,6 @@ func TestNewNetworkMessenger_WithConnMgrShouldWork(t *testing.T) { } func TestNewNetworkMessenger_WithNullPeerDiscoveryShouldWork(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -388,11 +378,6 @@ func TestNewNetworkMessenger_NilPeerDiscoveryShouldErr(t *testing.T) { } func TestNewNetworkMessenger_PeerDiscovererFailsWhenApplyingContextShouldErr(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -424,11 +409,6 @@ func TestNewNetworkMessenger_PeerDiscovererFailsWhenApplyingContextShouldErr(t * } func TestNewNetworkMessengerWithPortSweep_ShouldFindFreePort(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - _, sk := createLibP2PCredentialsMessenger() mes, err := libp2p.NewNetworkMessengerOnFreePort( @@ -651,11 +631,6 @@ func TestLibp2pMessenger_UnregisterTopicValidatorShouldWork(t *testing.T) { } func TestLibp2pMessenger_BroadcastDataLargeMessageShouldNotCallSend(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - msg := make([]byte, libp2p.MaxSendBuffSize+1) _, sk := createLibP2PCredentialsMessenger() @@ -683,11 +658,6 @@ func TestLibp2pMessenger_BroadcastDataLargeMessageShouldNotCallSend(t *testing.T } func TestLibp2pMessenger_BroadcastDataBetween2PeersShouldWork(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - msg := []byte("test message") _, mes1, mes2 := createMockNetworkOf2() @@ -724,14 +694,9 @@ func TestLibp2pMessenger_BroadcastDataBetween2PeersShouldWork(t *testing.T) { } func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - port := 4000 msg := []byte("test message") - numBroadcasts := 10000 + numBroadcasts := 2 * libp2p.BroadcastGoRoutines _, sk := createLibP2PCredentialsMessenger() ch := make(chan *p2p.SendableData) @@ -743,7 +708,6 @@ func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines nil, &mock.ChannelLoadBalancerStub{ CollectOneElementFromChannelsCalled: func() *p2p.SendableData { - time.Sleep(time.Millisecond * 100) return nil }, GetChannelOrDefaultCalled: func(pipe string) chan *p2p.SendableData { @@ -755,40 +719,33 @@ func TestLibp2pMessenger_BroadcastOnChannelBlockingShouldLimitNumberOfGoRoutines 0, ) - wg := sync.WaitGroup{} - wg.Add(numBroadcasts - libp2p.BroadcastGoRoutines) + numErrors := uint32(0) + chDone := make(chan struct{}) + go func() { + for atomic.LoadUint32(&numErrors) != uint32(numBroadcasts-libp2p.BroadcastGoRoutines) { + time.Sleep(time.Millisecond) + } + + chDone <- struct{}{} + }() + for i := 0; i < numBroadcasts; i++ { go func() { err := mes.BroadcastOnChannelBlocking("test", "test", msg) - if err != nil { - wg.Done() + if err == p2p.ErrTooManyGoroutines { + atomic.AddUint32(&numErrors, 1) } }() } - wg.Wait() - - assert.True(t, libp2p.BroadcastGoRoutines >= emptyChannel(ch)) -} - -func emptyChannel(ch chan *p2p.SendableData) int { - readsCnt := 0 - for { - select { - case <-ch: - readsCnt++ - default: - return readsCnt - } + select { + case <-chDone: + case <-time.After(timeout): + assert.Fail(t, "timout waiting for go routines to finish or number of errors received mismatched") } } func TestLibp2pMessenger_BroadcastDataBetween2PeersWithLargeMsgShouldWork(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - msg := make([]byte, libp2p.MaxSendBuffSize) _, mes1, mes2 := createMockNetworkOf2() @@ -825,11 +782,6 @@ func TestLibp2pMessenger_BroadcastDataBetween2PeersWithLargeMsgShouldWork(t *tes } func TestLibp2pMessenger_BroadcastDataOnTopicPipeBetween2PeersShouldWork(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - msg := []byte("test message") _, mes1, mes2 := createMockNetworkOf2() @@ -1251,11 +1203,6 @@ func generateConnWithRemotePeer(pid p2p.PeerID) network.Conn { } func TestLibp2pMessenger_TrimConnectionsCallsConnManagerTrimConnections(t *testing.T) { - //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -1297,11 +1244,6 @@ func TestLibp2pMessenger_TrimConnectionsCallsConnManagerTrimConnections(t *testi } func TestLibp2pMessenger_SendDataThrottlerShouldReturnCorrectObject(t *testing.T) { - //TODO remove skip when external library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -1401,6 +1343,11 @@ func TestLibp2pMessenger_SendDirectShouldNotBroadcastIfMessageIsPartiallyInvalid } func TestLibp2pMessenger_SendDirectWithMockNetToConnectedPeerShouldWork(t *testing.T) { + //TODO remove skip when github.com/koron/go-ssdp library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + msg := []byte("test message") _, mes1, mes2 := createMockNetworkOf2() @@ -1438,11 +1385,6 @@ func TestLibp2pMessenger_SendDirectWithMockNetToConnectedPeerShouldWork(t *testi } func TestLibp2pMessenger_SendDirectWithRealNetToConnectedPeerShouldWork(t *testing.T) { - //TODO remove skip when external library is concurrent safe - if testing.Short() { - t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") - } - msg := []byte("test message") _, sk1 := createLibP2PCredentialsMessenger() diff --git a/p2p/libp2p/networksharding/kadSharder_test.go b/p2p/libp2p/networksharding/kadSharder_test.go index 32df1464271..32a080225ce 100644 --- a/p2p/libp2p/networksharding/kadSharder_test.go +++ b/p2p/libp2p/networksharding/kadSharder_test.go @@ -15,7 +15,7 @@ const ( testNodesCount = 1000 ) -func fakeShard0(id p2p.PeerID) uint32 { +func fakeShard0(_ p2p.PeerID) uint32 { return 0 } diff --git a/p2p/libp2p/networksharding/noSharder.go b/p2p/libp2p/networksharding/noSharder.go index 87730473832..a2d58e84cc5 100644 --- a/p2p/libp2p/networksharding/noSharder.go +++ b/p2p/libp2p/networksharding/noSharder.go @@ -11,7 +11,7 @@ type noSharder struct { } // GetShard always 0 -func (ns *noSharder) GetShard(id peer.ID) uint32 { +func (ns *noSharder) GetShard(_ peer.ID) uint32 { return 0 } diff --git a/p2p/libp2p/networksharding/sharder.go b/p2p/libp2p/networksharding/sharder.go index 66c447514ff..23832eac8ba 100644 --- a/p2p/libp2p/networksharding/sharder.go +++ b/p2p/libp2p/networksharding/sharder.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/libp2p/go-libp2p-core/peer" - sha256 "github.com/minio/sha256-simd" + "github.com/minio/sha256-simd" ) var ( diff --git a/p2p/memp2p/export_test.go b/p2p/memp2p/export_test.go new file mode 100644 index 00000000000..d779ae8e0ee --- /dev/null +++ b/p2p/memp2p/export_test.go @@ -0,0 +1,11 @@ +package memp2p + +import "github.com/ElrondNetwork/elrond-go/p2p" + +func (messenger *Messenger) TopicValidator(name string) p2p.MessageProcessor { + messenger.topicsMutex.RLock() + processor := messenger.topicValidators[name] + messenger.topicsMutex.RUnlock() + + return processor +} diff --git a/p2p/memp2p/memp2p_test.go b/p2p/memp2p/memp2p_test.go deleted file mode 100644 index 5e78436285c..00000000000 --- a/p2p/memp2p/memp2p_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package memp2p_test - -import ( - "testing" - "time" - - "github.com/ElrondNetwork/elrond-go/p2p" - "github.com/ElrondNetwork/elrond-go/p2p/memp2p" - "github.com/ElrondNetwork/elrond-go/p2p/mock" - "github.com/stretchr/testify/assert" -) - -func TestInitializingNetworkAndPeer(t *testing.T) { - network, err := memp2p.NewNetwork() - assert.Nil(t, err) - - peer, err := memp2p.NewMessenger(network) - assert.Nil(t, err) - - assert.Equal(t, 1, len(network.Peers())) - assert.Equal(t, "Peer1", string(peer.ID())) - - assert.Equal(t, 1, len(peer.Addresses())) - assert.Equal(t, "/memp2p/Peer1", peer.Addresses()[0]) - - err = peer.Close() - assert.Nil(t, err) -} - -func TestInitializingNetworkwith4Peers(t *testing.T) { - network, _ := memp2p.NewNetwork() - - peer1, _ := memp2p.NewMessenger(network) - _, _ = memp2p.NewMessenger(network) - _, _ = memp2p.NewMessenger(network) - peer4, _ := memp2p.NewMessenger(network) - - assert.Equal(t, 4, len(network.Peers())) - assert.Equal(t, 4, len(peer4.Peers())) - - expectedAddresses := []string{"/memp2p/Peer1", "/memp2p/Peer2", "/memp2p/Peer3", "/memp2p/Peer4"} - assert.Equal(t, expectedAddresses, network.ListAddresses()) - - _ = peer1.Close() - peerIDs := network.PeerIDs() - peersMap := network.Peers() - assert.Equal(t, 3, len(peerIDs)) - assert.Equal(t, 3, len(peersMap)) - assert.Equal(t, p2p.PeerID("Peer2"), peerIDs[0]) - assert.Equal(t, p2p.PeerID("Peer3"), peerIDs[1]) - assert.Equal(t, p2p.PeerID("Peer4"), peerIDs[2]) - assert.NotContains(t, peersMap, "Peer1") - - expectedAddresses = []string{"/memp2p/Peer2", "/memp2p/Peer3", "/memp2p/Peer4"} - assert.Equal(t, expectedAddresses, network.ListAddresses()) -} - -func TestRegisteringTopics(t *testing.T) { - network, err := memp2p.NewNetwork() - assert.Nil(t, err) - - messenger, err := memp2p.NewMessenger(network) - assert.Nil(t, err) - - processor := &mock.MockMessageProcessor{} - - // Cannot register a MessageProcessor to a topic that doesn't exist. - err = messenger.RegisterMessageProcessor("rocket", processor) - assert.Equal(t, p2p.ErrNilTopic, err) - - // Create a proper topic. - assert.False(t, messenger.HasTopic("rocket")) - assert.Nil(t, messenger.CreateTopic("rocket", false)) - assert.True(t, messenger.HasTopic("rocket")) - - // The newly created topic has no MessageProcessor attached to it, so we - // attach one now. - assert.Nil(t, messenger.Topics["rocket"]) - err = messenger.RegisterMessageProcessor("rocket", processor) - assert.Nil(t, err) - assert.Equal(t, processor, messenger.Topics["rocket"]) - - // Cannot unregister a MessageProcessor from a topic that doesn't exist. - err = messenger.UnregisterMessageProcessor("albatross") - assert.Equal(t, p2p.ErrNilTopic, err) - - // Cannot unregister a MessageProcessor from a topic that doesn't have a - // MessageProcessor, even if the topic itself exists. - err = messenger.CreateTopic("nitrous_oxide", false) - assert.Nil(t, err) - err = messenger.UnregisterMessageProcessor("nitrous_oxide") - assert.Equal(t, p2p.ErrTopicValidatorOperationNotSupported, err) - - // Unregister the MessageProcessor from a topic that exists and has a - // MessageProcessor. - err = messenger.UnregisterMessageProcessor("rocket") - assert.Nil(t, err) - assert.True(t, messenger.HasTopic("rocket")) - assert.Nil(t, messenger.Topics["rocket"]) - - // Disallow creating duplicate topics. - err = messenger.CreateTopic("more_rockets", false) - assert.Nil(t, err) - assert.True(t, messenger.HasTopic("more_rockets")) - err = messenger.CreateTopic("more_rockets", false) - assert.NotNil(t, err) -} - -func TestBroadcastingMessages(t *testing.T) { - network, _ := memp2p.NewNetwork() - network.LogMessages = true - - peer1, _ := memp2p.NewMessenger(network) - peer2, _ := memp2p.NewMessenger(network) - peer3, _ := memp2p.NewMessenger(network) - peer4, _ := memp2p.NewMessenger(network) - - // All peers listen to the topic "rocket" - _ = peer1.CreateTopic("rocket", false) - _ = peer1.RegisterMessageProcessor("rocket", mock.NewMockMessageProcessor(peer1.ID())) - _ = peer2.CreateTopic("rocket", false) - _ = peer2.RegisterMessageProcessor("rocket", mock.NewMockMessageProcessor(peer2.ID())) - _ = peer3.CreateTopic("rocket", false) - _ = peer3.RegisterMessageProcessor("rocket", mock.NewMockMessageProcessor(peer3.ID())) - _ = peer4.CreateTopic("rocket", false) - _ = peer4.RegisterMessageProcessor("rocket", mock.NewMockMessageProcessor(peer4.ID())) - - // Send a message to everybody. - _ = peer1.BroadcastOnChannelBlocking("rocket", "rocket", []byte("launch the rocket")) - time.Sleep(1 * time.Second) - assert.Equal(t, 4, network.GetMessageCount()) - - // Send a message after disconnecting. No new messages should appear in the log. - err := peer1.Close() - assert.Nil(t, err) - _ = peer1.BroadcastOnChannelBlocking("rocket", "rocket", []byte("launch the rocket again")) - time.Sleep(1 * time.Second) - assert.Equal(t, 4, network.GetMessageCount()) - - peer2.Broadcast("rocket", []byte("launch another rocket")) - time.Sleep(1 * time.Second) - assert.Equal(t, 7, network.GetMessageCount()) - - peer3.Broadcast("nitrous_oxide", []byte("this is not a rocket")) - time.Sleep(1 * time.Second) - assert.Equal(t, 7, network.GetMessageCount()) -} - -func TestConnectivityAndTopics(t *testing.T) { - network, _ := memp2p.NewNetwork() - network.LogMessages = true - - // Create 4 peers on the network, all listening to the topic "rocket". - for i := 1; i <= 4; i++ { - peer, _ := memp2p.NewMessenger(network) - _ = peer.CreateTopic("rocket", false) - processor := mock.NewMockMessageProcessor(peer.ID()) - _ = peer.RegisterMessageProcessor("rocket", processor) - } - - // Peers 2 and 3 also listen on the topic "carbohydrate" - peer2 := network.Peers()["Peer2"] - peer3 := network.Peers()["Peer3"] - _ = peer2.CreateTopic("carbohydrate", false) - _ = peer2.RegisterMessageProcessor("carbohydrate", mock.NewMockMessageProcessor(peer2.ID())) - _ = peer3.CreateTopic("carbohydrate", false) - _ = peer3.RegisterMessageProcessor("carbohydrate", mock.NewMockMessageProcessor(peer3.ID())) - - peers1234 := []p2p.PeerID{"Peer1", "Peer2", "Peer3", "Peer4"} - peers234 := []p2p.PeerID{"Peer2", "Peer3", "Peer4"} - peers23 := []p2p.PeerID{"Peer2", "Peer3"} - - // Test to which peers is Peer1 connected, based on the topics they listen to. - peer1 := network.Peers()["Peer1"] - assert.Equal(t, peers1234, network.PeerIDs()) - assert.Equal(t, peers234, peer1.ConnectedPeers()) - assert.Equal(t, peers234, peer1.ConnectedPeersOnTopic("rocket")) - assert.Equal(t, peers23, peer1.ConnectedPeersOnTopic("carbohydrate")) -} - -func TestSendingDirectMessages(t *testing.T) { - network, _ := memp2p.NewNetwork() - network.LogMessages = true - - peer1, _ := memp2p.NewMessenger(network) - peer2, _ := memp2p.NewMessenger(network) - - var err error - - // Peer1 attempts to send a direct message to Peer2 on topic "rocket", but - // Peer2 is not listening to this topic. - err = peer1.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), "Peer2") - assert.Equal(t, p2p.ErrNilTopic, err) - - // The same as above, but in reverse (Peer2 sends to Peer1). - err = peer2.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), "Peer1") - assert.Equal(t, p2p.ErrNilTopic, err) - - // The network has logged no processed messages. - assert.Equal(t, 0, network.GetMessageCount()) - - // Create a topic on Peer1. This doesn't help, because Peer2 still can't - // receive messages on topic "rocket". - _ = peer1.CreateTopic("nitrous_oxide", false) - err = peer2.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), "Peer1") - assert.Equal(t, p2p.ErrNilTopic, err) - - // The network has still not logged any processed messages. - assert.Equal(t, 0, network.GetMessageCount()) - - // Finally, create the topic "rocket" on Peer1 and register a - // MessageProcessor. This allows it to receive a message on this topic from Peer2. - _ = peer1.CreateTopic("rocket", false) - _ = peer1.RegisterMessageProcessor("rocket", mock.NewMockMessageProcessor(peer1.ID())) - err = peer2.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), "Peer1") - assert.Nil(t, err) - - // The network has finally logged a processed message. - assert.Equal(t, 1, network.GetMessageCount()) -} diff --git a/p2p/memp2p/memp2p_message.go b/p2p/memp2p/message.go similarity index 58% rename from p2p/memp2p/memp2p_message.go rename to p2p/memp2p/message.go index bb6bd6aeb5e..b0b4205b46b 100644 --- a/p2p/memp2p/memp2p_message.go +++ b/p2p/memp2p/message.go @@ -1,10 +1,14 @@ package memp2p -import "github.com/ElrondNetwork/elrond-go/p2p" +import ( + "encoding/binary" + + "github.com/ElrondNetwork/elrond-go/p2p" +) // Message represents a message to be sent through the in-memory network // simulated by the Network struct. -type Message struct { +type message struct { // sending PeerID, converted to []byte from []byte @@ -28,59 +32,58 @@ type Message struct { } // NewMessage constructs a new Message instance from arguments -func NewMessage(topic string, data []byte, peerID p2p.PeerID) (*Message, error) { - var empty []byte - message := Message{ +func newMessage(topic string, data []byte, peerID p2p.PeerID, seqNo uint64) *message { + empty := make([]byte, 0) + seqNoBytes := make([]byte, 8) + binary.BigEndian.PutUint64(seqNoBytes, seqNo) + + return &message{ from: []byte(string(peerID)), data: data, - seqNo: empty, + seqNo: seqNoBytes, topicIds: []string{topic}, signature: empty, key: []byte(string(peerID)), peer: peerID, } - return &message, nil } // From returns the message originator's peer ID -func (message *Message) From() []byte { - return message.from +func (msg *message) From() []byte { + return msg.from } // Data returns the message payload -func (message *Message) Data() []byte { - return message.data +func (msg *message) Data() []byte { + return msg.data } // SeqNo returns the message sequence number -func (message *Message) SeqNo() []byte { - return message.seqNo +func (msg *message) SeqNo() []byte { + return msg.seqNo } // TopicIDs returns the topic on which the message was sent -func (message *Message) TopicIDs() []string { - return message.topicIds +func (msg *message) TopicIDs() []string { + return msg.topicIds } // Signature returns the message signature -func (message *Message) Signature() []byte { - return message.signature +func (msg *message) Signature() []byte { + return msg.signature } // Key returns the message public key (if it can not be recovered from From field) -func (message *Message) Key() []byte { - return message.key +func (msg *message) Key() []byte { + return msg.key } // Peer returns the peer that originated the message -func (message *Message) Peer() p2p.PeerID { - return message.peer +func (msg *message) Peer() p2p.PeerID { + return msg.peer } // IsInterfaceNil returns true if there is no value under the interface -func (message *Message) IsInterfaceNil() bool { - if message == nil { - return true - } - return false +func (msg *message) IsInterfaceNil() bool { + return msg == nil } diff --git a/p2p/memp2p/memp2p.go b/p2p/memp2p/messenger.go similarity index 65% rename from p2p/memp2p/memp2p.go rename to p2p/memp2p/messenger.go index e186fee1ea3..55d3dbe4443 100644 --- a/p2p/memp2p/memp2p.go +++ b/p2p/memp2p/messenger.go @@ -1,13 +1,19 @@ package memp2p import ( + "crypto/rand" + "encoding/base64" "fmt" "sync" + "sync/atomic" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/p2p" ) +const maxQueueSize = 1000 + var log = logger.GetOrCreate("p2p/memp2p") // Messenger is an implementation of the p2p.Messenger interface that @@ -24,11 +30,15 @@ var log = logger.GetOrCreate("p2p/memp2p") // broadcasting a message will be received by all the messengers in the // network. type Messenger struct { - Network *Network - P2PID p2p.PeerID - Address string - Topics map[string]p2p.MessageProcessor - TopicsMutex sync.RWMutex + network *Network + p2pID p2p.PeerID + address string + topics map[string]struct{} + topicValidators map[string]p2p.MessageProcessor + topicsMutex *sync.RWMutex + seqNo uint64 + processQueue chan p2p.MessageP2P + numReceived uint64 } // NewMessenger constructs a new Messenger that is connected to the @@ -38,25 +48,29 @@ func NewMessenger(network *Network) (*Messenger, error) { return nil, ErrNilNetwork } - ID := fmt.Sprintf("Peer%d", len(network.PeerIDs())+1) + buff := make([]byte, 32) + _, _ = rand.Reader.Read(buff) + ID := base64.StdEncoding.EncodeToString(buff) Address := fmt.Sprintf("/memp2p/%s", ID) messenger := &Messenger{ - Network: network, - P2PID: p2p.PeerID(ID), - Address: Address, - Topics: make(map[string]p2p.MessageProcessor), - TopicsMutex: sync.RWMutex{}, + network: network, + p2pID: p2p.PeerID(ID), + address: Address, + topics: make(map[string]struct{}), + topicValidators: make(map[string]p2p.MessageProcessor), + topicsMutex: &sync.RWMutex{}, + processQueue: make(chan p2p.MessageP2P, maxQueueSize), } - network.RegisterPeer(messenger) + go messenger.processFromQueue() return messenger, nil } // ID returns the P2P ID of the messenger func (messenger *Messenger) ID() p2p.PeerID { - return messenger.P2PID + return messenger.p2pID } // Peers returns a slice containing the P2P IDs of all the other peers that it @@ -69,7 +83,7 @@ func (messenger *Messenger) Peers() []p2p.PeerID { if !messenger.IsConnectedToNetwork() { return []p2p.PeerID{} } - return messenger.Network.PeerIDs() + return messenger.network.PeerIDs() } // Addresses returns a list of all the physical addresses that this Messenger @@ -78,14 +92,14 @@ func (messenger *Messenger) Peers() []p2p.PeerID { // return is an artificial one, built by the constructor NewMessenger(). func (messenger *Messenger) Addresses() []string { addresses := make([]string, 1) - addresses[0] = messenger.Address + addresses[0] = messenger.address return addresses } // ConnectToPeer usually does nothing, because peers connected to the in-memory // network are already all connected to each other. This function will return // an error if the Messenger is not connected to the network, though. -func (messenger *Messenger) ConnectToPeer(address string) error { +func (messenger *Messenger) ConnectToPeer(_ string) error { if !messenger.IsConnectedToNetwork() { return ErrNotConnectedToNetwork } @@ -96,13 +110,13 @@ func (messenger *Messenger) ConnectToPeer(address string) error { // IsConnectedToNetwork returns true if this messenger is connected to the // in-memory network, false otherwise. func (messenger *Messenger) IsConnectedToNetwork() bool { - return messenger.Network.IsPeerConnected(messenger.ID()) + return messenger.network.IsPeerConnected(messenger.ID()) } // IsConnected returns true if this Messenger is connected to the peer with the // specified ID. It always returns true if the Messenger is connected to the // network and false otherwise, regardless of the provided peer ID. -func (messenger *Messenger) IsConnected(peerID p2p.PeerID) bool { +func (messenger *Messenger) IsConnected(_ p2p.PeerID) bool { return messenger.IsConnectedToNetwork() } @@ -115,7 +129,7 @@ func (messenger *Messenger) ConnectedPeers() []p2p.PeerID { if !messenger.IsConnectedToNetwork() { return []p2p.PeerID{} } - return messenger.Network.PeerIDsExceptOne(messenger.ID()) + return messenger.network.PeerIDsExceptOne(messenger.ID()) } // ConnectedAddresses returns a slice of peer addresses to which this Messenger @@ -125,7 +139,7 @@ func (messenger *Messenger) ConnectedAddresses() []string { if !messenger.IsConnectedToNetwork() { return []string{} } - return messenger.Network.ListAddressesExceptOne(messenger.ID()) + return messenger.network.ListAddressesExceptOne(messenger.ID()) } // PeerAddress creates the address string from a given peer ID. @@ -142,12 +156,10 @@ func (messenger *Messenger) ConnectedPeersOnTopic(topic string) []p2p.PeerID { return filteredPeers } - allPeerIDsExceptThis := messenger.Network.PeerIDsExceptOne(messenger.ID()) - allPeersExceptThis := messenger.Network.PeersExceptOne(messenger.ID()) - for _, peerID := range allPeerIDsExceptThis { - peer := allPeersExceptThis[peerID] + allPeersExceptThis := messenger.network.PeersExceptOne(messenger.ID()) + for _, peer := range allPeersExceptThis { if peer.HasTopic(topic) { - filteredPeers = append(filteredPeers, peerID) + filteredPeers = append(filteredPeers, peer.ID()) } } @@ -167,26 +179,25 @@ func (messenger *Messenger) Bootstrap() error { // CreateTopic adds the topic provided as argument to the list of topics of // interest for this Messenger. It also registers a nil message validator to // handle the messages received on this topic. -func (messenger *Messenger) CreateTopic(name string, createChannelForTopic bool) error { - messenger.TopicsMutex.Lock() +func (messenger *Messenger) CreateTopic(name string, _ bool) error { + messenger.topicsMutex.Lock() + defer messenger.topicsMutex.Unlock() - _, found := messenger.Topics[name] + _, found := messenger.topics[name] if found { - messenger.TopicsMutex.Unlock() return p2p.ErrTopicAlreadyExists } + messenger.topics[name] = struct{}{} - messenger.Topics[name] = nil - messenger.TopicsMutex.Unlock() return nil } // HasTopic returns true if this Messenger has declared interest in the given // topic; returns false otherwise. func (messenger *Messenger) HasTopic(name string) bool { - messenger.TopicsMutex.RLock() - _, found := messenger.Topics[name] - messenger.TopicsMutex.RUnlock() + messenger.topicsMutex.RLock() + _, found := messenger.topics[name] + messenger.topicsMutex.RUnlock() return found } @@ -195,52 +206,54 @@ func (messenger *Messenger) HasTopic(name string) bool { // the given topic and has registered a non-nil validator on that topic. // Returns false otherwise. func (messenger *Messenger) HasTopicValidator(name string) bool { - messenger.TopicsMutex.RLock() - validator, found := messenger.Topics[name] - messenger.TopicsMutex.RUnlock() + messenger.topicsMutex.RLock() + validator := messenger.topicValidators[name] + messenger.topicsMutex.RUnlock() - return found && (validator != nil) + return check.IfNil(validator) } // RegisterMessageProcessor sets the provided message processor to be the // processor of received messages for the given topic. func (messenger *Messenger) RegisterMessageProcessor(topic string, handler p2p.MessageProcessor) error { - if handler == nil || handler.IsInterfaceNil() { + if check.IfNil(handler) { return p2p.ErrNilValidator } - messenger.TopicsMutex.Lock() - defer messenger.TopicsMutex.Unlock() - validator, found := messenger.Topics[topic] + messenger.topicsMutex.Lock() + defer messenger.topicsMutex.Unlock() + _, found := messenger.topics[topic] if !found { - return p2p.ErrNilTopic + return fmt.Errorf("%w RegisterMessageProcessor, topic: %s", p2p.ErrNilTopic, topic) } - if validator != nil { + validator := messenger.topicValidators[topic] + if !check.IfNil(validator) { return p2p.ErrTopicValidatorOperationNotSupported } - messenger.Topics[topic] = handler + messenger.topicValidators[topic] = handler return nil } // UnregisterMessageProcessor unsets the message processor for the given topic // (sets it to nil). func (messenger *Messenger) UnregisterMessageProcessor(topic string) error { - messenger.TopicsMutex.Lock() - defer messenger.TopicsMutex.Unlock() - validator, found := messenger.Topics[topic] + messenger.topicsMutex.Lock() + defer messenger.topicsMutex.Unlock() + _, found := messenger.topics[topic] if !found { - return p2p.ErrNilTopic + return fmt.Errorf("%w UnregisterMessageProcessor, topic: %s", p2p.ErrNilTopic, topic) } - if validator == nil { + validator := messenger.topicValidators[topic] + if check.IfNil(validator) { return p2p.ErrTopicValidatorOperationNotSupported } - messenger.Topics[topic] = nil + messenger.topicValidators[topic] = nil return nil } @@ -254,8 +267,8 @@ func (messenger *Messenger) OutgoingChannelLoadBalancer() p2p.ChannelLoadBalance // have their ReceiveMessage() function called synchronously. The call // to parametricBroadcast() is done synchronously as well. This function should // be called as a go-routine. -func (messenger *Messenger) BroadcastOnChannelBlocking(channel string, topic string, buff []byte) error { - return messenger.parametricBroadcast(topic, buff, false) +func (messenger *Messenger) BroadcastOnChannelBlocking(_ string, topic string, buff []byte) error { + return messenger.synchronousBroadcast(topic, buff) } // BroadcastOnChannel sends the message to all peers in the network. It calls @@ -264,8 +277,8 @@ func (messenger *Messenger) BroadcastOnChannelBlocking(channel string, topic str // parametricBroadcast() is done as a go-routine, which means this function is, // in fact, non-blocking, but it is identical with BroadcastOnChannelBlocking() // in all other regards. -func (messenger *Messenger) BroadcastOnChannel(channel string, topic string, buff []byte) { - err := messenger.parametricBroadcast(topic, buff, false) +func (messenger *Messenger) BroadcastOnChannel(_ string, topic string, buff []byte) { + err := messenger.synchronousBroadcast(topic, buff) log.LogIfError(err) } @@ -273,91 +286,84 @@ func (messenger *Messenger) BroadcastOnChannel(channel string, topic string, buf // calls parametricBroadcast() with async=true, which means that peers will // have their ReceiveMessage() function independently called as go-routines. func (messenger *Messenger) Broadcast(topic string, buff []byte) { - err := messenger.parametricBroadcast(topic, buff, true) + err := messenger.synchronousBroadcast(topic, buff) log.LogIfError(err) } -// parametricBroadcast sends a message to all peers in the network, with the -// possibility to choose from asynchronous or synchronous sending. -func (messenger *Messenger) parametricBroadcast(topic string, data []byte, async bool) error { +// synchronousBroadcast sends a message to all peers in the network in a synchronous way +func (messenger *Messenger) synchronousBroadcast(topic string, data []byte) error { if !messenger.IsConnectedToNetwork() { return ErrNotConnectedToNetwork } - message, err := NewMessage(topic, data, messenger.ID()) - if err != nil { - return err + seqNo := atomic.AddUint64(&messenger.seqNo, 1) + message := newMessage(topic, data, messenger.ID(), seqNo) + + peers := messenger.network.Peers() + for _, peer := range peers { + peer.receiveMessage(message) } - for _, peer := range messenger.Network.Peers() { - if async { - go func(receivingPeer *Messenger) { - err := receivingPeer.ReceiveMessage(topic, message, true) - log.LogIfError(err) - }(peer) - } else { - err = peer.ReceiveMessage(topic, message, true) + return nil +} + +func (messenger *Messenger) processFromQueue() { + for { + message := <-messenger.processQueue + if check.IfNil(message) { + continue } - if err != nil { - break + + topic := message.TopicIDs()[0] + if topic == "" { + continue } - } - return err + messenger.topicsMutex.Lock() + _, found := messenger.topics[topic] + if !found { + messenger.topicsMutex.Unlock() + continue + } + + // numReceived gets incremented because the message arrived on a registered topic + atomic.AddUint64(&messenger.numReceived, 1) + validator := messenger.topicValidators[topic] + if check.IfNil(validator) { + messenger.topicsMutex.Unlock() + continue + } + messenger.topicsMutex.Unlock() + + _ = validator.ProcessReceivedMessage(message, nil) + } } // SendToConnectedPeer sends a message directly to the peer specified by the ID. func (messenger *Messenger) SendToConnectedPeer(topic string, buff []byte, peerID p2p.PeerID) error { if messenger.IsConnectedToNetwork() { - if peerID == messenger.ID() { - return ErrCannotSendToSelf - } - message, err := NewMessage(topic, buff, messenger.ID()) - if err != nil { - return err - } - receivingPeer, peerFound := messenger.Network.PeersExceptOne(messenger.ID())[peerID] + seqNo := atomic.AddUint64(&messenger.seqNo, 1) + message := newMessage(topic, buff, messenger.ID(), seqNo) + receivingPeer, peerFound := messenger.network.Peers()[peerID] if !peerFound { return ErrReceivingPeerNotConnected } + receivingPeer.receiveMessage(message) - return receivingPeer.ReceiveMessage(topic, message, false) + return nil } return ErrNotConnectedToNetwork } -// ReceiveMessage handles the received message by passing it to the message +// receiveMessage handles the received message by passing it to the message // processor of the corresponding topic, given that this Messenger has // previously registered a message processor for that topic. The Network will // log the message only if the Network.LogMessages flag is set and only if the // Messenger has the requested topic and MessageProcessor. -func (messenger *Messenger) ReceiveMessage(topic string, message p2p.MessageP2P, allowBroadcast bool) error { - messenger.TopicsMutex.Lock() - validator, found := messenger.Topics[topic] - messenger.TopicsMutex.Unlock() - - if !found { - return p2p.ErrNilTopic - } - - if validator == nil { - return p2p.ErrNilValidator - } - - if messenger.Network.LogMessages { - messenger.Network.LogMessage(message) - } - - var handler func(buffToSend []byte) - if allowBroadcast { - handler = func(buffToSend []byte) { - messenger.Broadcast(topic, buffToSend) - } - } - - return validator.ProcessReceivedMessage(message, handler) +func (messenger *Messenger) receiveMessage(message p2p.MessageP2P) { + messenger.processQueue <- message } // IsConnectedToTheNetwork returns true as this implementation is always connected to its network @@ -366,7 +372,7 @@ func (messenger *Messenger) IsConnectedToTheNetwork() bool { } // SetThresholdMinConnectedPeers does nothing as this implementation is always connected to its network -func (messenger *Messenger) SetThresholdMinConnectedPeers(minConnectedPeers int) error { +func (messenger *Messenger) SetThresholdMinConnectedPeers(_ int) error { return nil } @@ -375,16 +381,18 @@ func (messenger *Messenger) ThresholdMinConnectedPeers() int { return 0 } +// NumMessagesReceived returns the number of messages received +func (messenger *Messenger) NumMessagesReceived() uint64 { + return atomic.LoadUint64(&messenger.numReceived) +} + // Close disconnects this Messenger from the network it was connected to. func (messenger *Messenger) Close() error { - messenger.Network.UnregisterPeer(messenger.ID()) + messenger.network.UnregisterPeer(messenger.ID()) return nil } // IsInterfaceNil returns true if there is no value under the interface func (messenger *Messenger) IsInterfaceNil() bool { - if messenger == nil { - return true - } - return false + return messenger == nil } diff --git a/p2p/memp2p/messenger_test.go b/p2p/memp2p/messenger_test.go new file mode 100644 index 00000000000..ae28fb068be --- /dev/null +++ b/p2p/memp2p/messenger_test.go @@ -0,0 +1,186 @@ +package memp2p_test + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/p2p/memp2p" + "github.com/ElrondNetwork/elrond-go/p2p/mock" + "github.com/stretchr/testify/assert" +) + +func TestInitializingNetworkAndPeer(t *testing.T) { + network := memp2p.NewNetwork() + + peer, err := memp2p.NewMessenger(network) + assert.Nil(t, err) + + assert.Equal(t, 1, len(network.Peers())) + + assert.Equal(t, 1, len(peer.Addresses())) + assert.Equal(t, "/memp2p/"+string(peer.ID()), peer.Addresses()[0]) + + err = peer.Close() + assert.Nil(t, err) +} + +func TestRegisteringTopics(t *testing.T) { + network := memp2p.NewNetwork() + + messenger, err := memp2p.NewMessenger(network) + assert.Nil(t, err) + + processor := &mock.MockMessageProcessor{} + + // Cannot register a MessageProcessor to a topic that doesn't exist. + err = messenger.RegisterMessageProcessor("rocket", processor) + assert.True(t, errors.Is(err, p2p.ErrNilTopic)) + + // Create a proper topic. + assert.False(t, messenger.HasTopic("rocket")) + assert.Nil(t, messenger.CreateTopic("rocket", false)) + assert.True(t, messenger.HasTopic("rocket")) + + // The newly created topic has no MessageProcessor attached to it, so we + // attach one now. + assert.Nil(t, messenger.TopicValidator("rocket")) + err = messenger.RegisterMessageProcessor("rocket", processor) + assert.Nil(t, err) + assert.Equal(t, processor, messenger.TopicValidator("rocket")) + + // Cannot unregister a MessageProcessor from a topic that doesn't exist. + err = messenger.UnregisterMessageProcessor("albatross") + assert.True(t, errors.Is(err, p2p.ErrNilTopic)) + + // Cannot unregister a MessageProcessor from a topic that doesn't have a + // MessageProcessor, even if the topic itself exists. + err = messenger.CreateTopic("nitrous_oxide", false) + assert.Nil(t, err) + err = messenger.UnregisterMessageProcessor("nitrous_oxide") + assert.Equal(t, p2p.ErrTopicValidatorOperationNotSupported, err) + + // Unregister the MessageProcessor from a topic that exists and has a + // MessageProcessor. + err = messenger.UnregisterMessageProcessor("rocket") + assert.Nil(t, err) + assert.True(t, messenger.HasTopic("rocket")) + assert.Nil(t, messenger.TopicValidator("rocket")) + + // Disallow creating duplicate topics. + err = messenger.CreateTopic("more_rockets", false) + assert.Nil(t, err) + assert.True(t, messenger.HasTopic("more_rockets")) + err = messenger.CreateTopic("more_rockets", false) + assert.NotNil(t, err) +} + +func TestBroadcastingMessages(t *testing.T) { + network := memp2p.NewNetwork() + + numPeers := 4 + peers := make([]*memp2p.Messenger, numPeers) + for i := 0; i < numPeers; i++ { + peer, _ := memp2p.NewMessenger(network) + _ = peer.CreateTopic("rocket", false) + peers[i] = peer + } + + // Send a message to everybody. + _ = peers[0].BroadcastOnChannelBlocking("rocket", "rocket", []byte("launch the rocket")) + time.Sleep(1 * time.Second) + testReceivedMessages(t, peers, map[int]uint64{0: 1, 1: 1, 2: 1, 3: 1, 4: 1}) + + // Send a message after disconnecting. No new messages should get broadcast + err := peers[0].Close() + assert.Nil(t, err) + _ = peers[0].BroadcastOnChannelBlocking("rocket", "rocket", []byte("launch the rocket again")) + time.Sleep(1 * time.Second) + testReceivedMessages(t, peers, map[int]uint64{0: 1, 1: 1, 2: 1, 3: 1, 4: 1}) + + peers[2].Broadcast("rocket", []byte("launch another rocket")) + time.Sleep(1 * time.Second) + testReceivedMessages(t, peers, map[int]uint64{0: 1, 1: 2, 2: 2, 3: 2, 4: 2}) + + peers[2].Broadcast("nitrous_oxide", []byte("this message should not get broadcast")) + time.Sleep(1 * time.Second) + testReceivedMessages(t, peers, map[int]uint64{0: 1, 1: 2, 2: 2, 3: 2, 4: 2}) +} + +func testReceivedMessages(t *testing.T, peers []*memp2p.Messenger, receivedNumMap map[int]uint64) { + for idx, p := range peers { + val, found := receivedNumMap[idx] + if !found { + assert.Fail(t, fmt.Sprintf("number of messages received was not defined for index %d", idx)) + return + } + + assert.Equal(t, val, p.NumMessagesReceived(), "for peer on index %d", idx) + } +} + +func TestConnectivityAndTopics(t *testing.T) { + network := memp2p.NewNetwork() + + // Create 4 peers on the network, all listening to the topic "rocket". + numPeers := 4 + peers := make([]*memp2p.Messenger, numPeers) + for i := 0; i < numPeers; i++ { + peer, _ := memp2p.NewMessenger(network) + _ = peer.CreateTopic("rocket", false) + peers[i] = peer + } + + // Peers 2 and 3 also listen on the topic "carbohydrate" + _ = peers[2].CreateTopic("carbohydrate", false) + _ = peers[2].RegisterMessageProcessor("carbohydrate", mock.NewMockMessageProcessor(peers[2].ID())) + _ = peers[3].CreateTopic("carbohydrate", false) + _ = peers[3].RegisterMessageProcessor("carbohydrate", mock.NewMockMessageProcessor(peers[3].ID())) + + // Test to which peers is Peer0 connected, based on the topics they listen to. + peer0 := peers[0] + assert.Equal(t, numPeers, len(network.PeerIDs())) + assert.Equal(t, numPeers-1, len(peer0.ConnectedPeers())) + assert.Equal(t, numPeers-1, len(peer0.ConnectedPeersOnTopic("rocket"))) + assert.Equal(t, 2, len(peer0.ConnectedPeersOnTopic("carbohydrate"))) +} + +func TestSendingDirectMessages(t *testing.T) { + network := memp2p.NewNetwork() + + peer1, _ := memp2p.NewMessenger(network) + peer2, _ := memp2p.NewMessenger(network) + + // Peer1 attempts to send a direct message to Peer2 on topic "rocket", but + // Peer2 is not listening to this topic. + _ = peer1.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), peer2.ID()) + time.Sleep(time.Millisecond * 100) + + // The same as above, but in reverse (Peer2 sends to Peer1). + _ = peer2.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), peer1.ID()) + time.Sleep(time.Millisecond * 100) + + // Both peers did not get the message + assert.Equal(t, uint64(0), peer1.NumMessagesReceived()) + assert.Equal(t, uint64(0), peer2.NumMessagesReceived()) + + // Create a topic on Peer1. This doesn't help, because Peer2 still can't + // receive messages on topic "rocket". + _ = peer1.CreateTopic("nitrous_oxide", false) + _ = peer2.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), peer1.ID()) + time.Sleep(time.Millisecond * 100) + + // peer1 still did not get the message + assert.Equal(t, uint64(0), peer1.NumMessagesReceived()) + + // Finally, create the topic "rocket" on Peer1 + // This allows it to receive a message on this topic from Peer2. + _ = peer1.CreateTopic("rocket", false) + _ = peer2.SendToConnectedPeer("rocket", []byte("try to launch this rocket"), peer1.ID()) + time.Sleep(time.Millisecond * 100) + + // Peer1 got the message + assert.Equal(t, uint64(1), peer1.NumMessagesReceived()) +} diff --git a/p2p/memp2p/memp2p_network.go b/p2p/memp2p/network.go similarity index 55% rename from p2p/memp2p/memp2p_network.go rename to p2p/memp2p/network.go index 232b96eb047..8cd70403dd4 100644 --- a/p2p/memp2p/memp2p_network.go +++ b/p2p/memp2p/network.go @@ -12,79 +12,57 @@ import ( // peers. The peers are connected to the network if they are in the internal // `peers` map; otherwise, they are disconnected. type Network struct { - mutex sync.RWMutex - messageLogMutex sync.RWMutex - peerIDs []p2p.PeerID - peers map[p2p.PeerID]*Messenger - LogMessages bool - Messages []p2p.MessageP2P + mutex sync.RWMutex + peers map[p2p.PeerID]*Messenger } // NewNetwork constructs a new Network instance with an empty // internal map of peers. -func NewNetwork() (*Network, error) { - var peerIDs []p2p.PeerID - var messages []p2p.MessageP2P - +func NewNetwork() *Network { network := Network{ - mutex: sync.RWMutex{}, - messageLogMutex: sync.RWMutex{}, - peerIDs: peerIDs, - peers: make(map[p2p.PeerID]*Messenger), - LogMessages: false, - Messages: messages, + mutex: sync.RWMutex{}, + peers: make(map[p2p.PeerID]*Messenger), } - return &network, nil -} - -// ListAddresses provides the addresses of the known peers. -func (network *Network) ListAddresses() []string { - network.mutex.RLock() - addresses := make([]string, len(network.peerIDs)) - for i, peerID := range network.peerIDs { - addresses[i] = fmt.Sprintf("/memp2p/%s", peerID) - } - network.mutex.RUnlock() - return addresses + return &network } // ListAddressesExceptOne provides the addresses of the known peers, except a specified one. func (network *Network) ListAddressesExceptOne(peerIDToExclude p2p.PeerID) []string { network.mutex.RLock() - resultingLength := len(network.peerIDs) - 1 - if resultingLength <= 0 { - network.mutex.RUnlock() - return []string{} - } + resultingLength := len(network.peers) - 1 addresses := make([]string, resultingLength) - k := 0 - for _, peerID := range network.peerIDs { - if peerID == peerIDToExclude { + idx := 0 + for _, peer := range network.peers { + if peer.ID() == peerIDToExclude { continue } - addresses[k] = fmt.Sprintf("/memp2p/%s", peerID) - k++ + addresses[idx] = fmt.Sprintf("/memp2p/%s", peer.ID()) + idx++ } network.mutex.RUnlock() + return addresses } // Peers provides a copy of its internal map of peers func (network *Network) Peers() map[p2p.PeerID]*Messenger { - network.mutex.RLock() peersCopy := make(map[p2p.PeerID]*Messenger) + + network.mutex.RLock() for peerID, peer := range network.peers { peersCopy[peerID] = peer } network.mutex.RUnlock() + return peersCopy } // PeersExceptOne provides a copy of its internal map of peers, excluding a specific peer. func (network *Network) PeersExceptOne(peerIDToExclude p2p.PeerID) map[p2p.PeerID]*Messenger { - network.mutex.RLock() peersCopy := make(map[p2p.PeerID]*Messenger) + + network.mutex.RLock() for peerID, peer := range network.peers { if peerID == peerIDToExclude { continue @@ -92,35 +70,35 @@ func (network *Network) PeersExceptOne(peerIDToExclude p2p.PeerID) map[p2p.PeerI peersCopy[peerID] = peer } network.mutex.RUnlock() + return peersCopy } // PeerIDs provides a copy of its internal slice of peerIDs func (network *Network) PeerIDs() []p2p.PeerID { network.mutex.RLock() - peerIDsCopy := make([]p2p.PeerID, len(network.peerIDs)) - _ = copy(peerIDsCopy, network.peerIDs) + peerIDsCopy := make([]p2p.PeerID, len(network.peers)) + idx := 0 + for peerID := range network.peers { + peerIDsCopy[idx] = peerID + idx++ + } network.mutex.RUnlock() + return peerIDsCopy } //PeerIDsExceptOne provides a copy of its internal slice of peerIDs, excluding a specific peer. func (network *Network) PeerIDsExceptOne(peerIDToExclude p2p.PeerID) []p2p.PeerID { network.mutex.RLock() - resultingLength := len(network.peerIDs) - 1 - if resultingLength <= 0 { - network.mutex.RUnlock() - return []p2p.PeerID{} - } - - peerIDsCopy := make([]p2p.PeerID, resultingLength) - k := 0 - for _, peerID := range network.peerIDs { + peerIDsCopy := make([]p2p.PeerID, len(network.peers)-1) + idx := 0 + for peerID := range network.peers { if peerID == peerIDToExclude { continue } - peerIDsCopy[k] = peerID - k++ + peerIDsCopy[idx] = peerID + idx++ } network.mutex.RUnlock() return peerIDsCopy @@ -130,7 +108,6 @@ func (network *Network) PeerIDsExceptOne(peerIDToExclude p2p.PeerID) []p2p.PeerI // slice. func (network *Network) RegisterPeer(messenger *Messenger) { network.mutex.Lock() - network.peerIDs = append(network.peerIDs, messenger.ID()) network.peers[messenger.ID()] = messenger network.mutex.Unlock() } @@ -139,35 +116,10 @@ func (network *Network) RegisterPeer(messenger *Messenger) { // the peerIDs slice. func (network *Network) UnregisterPeer(peerID p2p.PeerID) { network.mutex.Lock() - // Delete from the Peers map. delete(network.peers, peerID) - // Remove from the peerIDs slice, maintaining the order of the slice. - index := -1 - for i, id := range network.peerIDs { - if id == peerID { - index = i - } - } - network.peerIDs = append(network.peerIDs[0:index], network.peerIDs[index+1:]...) network.mutex.Unlock() } -// LogMessage adds a message to its internal log of messages. -func (network *Network) LogMessage(message p2p.MessageP2P) { - network.messageLogMutex.Lock() - network.Messages = append(network.Messages, message) - network.messageLogMutex.Unlock() -} - -// GetMessageCount returns the number of messages logged internally by the -// Network. -func (network *Network) GetMessageCount() int { - network.messageLogMutex.RLock() - count := len(network.Messages) - network.messageLogMutex.RUnlock() - return count -} - // IsPeerConnected returns true if the peer represented by the provided ID is // found in the inner `peers` map of the Network instance, which // determines whether it is connected to the network or not. diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 6dde2f0b05c..d1f1a75248b 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -3,7 +3,6 @@ package block import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core/serviceContainer" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -26,22 +25,25 @@ type ArgBaseProcessor struct { NodesCoordinator sharding.NodesCoordinator SpecialAddressHandler process.SpecialAddressHandler Uint64Converter typeConverters.Uint64ByteSliceConverter - StartHeaders map[uint32]data.HeaderHandler RequestHandler process.RequestHandler Core serviceContainer.Core BlockChainHook process.BlockChainHookHandler TxCoordinator process.TransactionCoordinator ValidatorStatisticsProcessor process.ValidatorStatisticsProcessor + EpochStartTrigger process.EpochStartTriggerHandler + HeaderValidator process.HeaderConstructionValidator Rounder consensus.Rounder BootStorer process.BootStorer + BlockTracker process.BlockTracker } // ArgShardProcessor holds all dependencies required by the process data factory in order to create // new instances of shard processor type ArgShardProcessor struct { ArgBaseProcessor - DataPool dataRetriever.PoolsHolder - TxsPoolsCleaner process.PoolsCleaner + DataPool dataRetriever.PoolsHolder + TxsPoolsCleaner process.PoolsCleaner + StateCheckpointModulus uint } // ArgMetaProcessor holds all dependencies required by the process data factory in order to create @@ -49,6 +51,7 @@ type ArgShardProcessor struct { type ArgMetaProcessor struct { ArgBaseProcessor DataPool dataRetriever.MetaPoolsHolder + PendingMiniBlocks process.PendingMiniBlocksHandler SCDataGetter external.SCQueryService PeerChangesHandler process.PeerChangesHandler SCToProtocol process.SmartContractToProtocolHandler diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index b6b497baaeb..2c5f673ebec 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -9,7 +9,6 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" - "github.com/ElrondNetwork/elrond-go/core/sliceUtil" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" @@ -22,7 +21,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" ) var log = logger.GetOrCreate("process/block") @@ -50,38 +48,30 @@ type hdrForBlock struct { hdrHashAndInfo map[string]*hdrInfo } -type mapShardHeaders map[uint32][]data.HeaderHandler -type mapShardHeader map[uint32]data.HeaderHandler - type baseProcessor struct { shardCoordinator sharding.Coordinator nodesCoordinator sharding.NodesCoordinator specialAddressHandler process.SpecialAddressHandler accounts state.AccountsAdapter forkDetector process.ForkDetector + validatorStatisticsProcessor process.ValidatorStatisticsProcessor hasher hashing.Hasher marshalizer marshal.Marshalizer store dataRetriever.StorageService uint64Converter typeConverters.Uint64ByteSliceConverter blockSizeThrottler process.BlockSizeThrottler + epochStartTrigger process.EpochStartTriggerHandler + headerValidator process.HeaderConstructionValidator blockChainHook process.BlockChainHookHandler txCoordinator process.TransactionCoordinator - validatorStatisticsProcessor process.ValidatorStatisticsProcessor rounder consensus.Rounder bootStorer process.BootStorer requestBlockBodyHandler process.RequestBlockBodyHandler + requestHandler process.RequestHandler + blockTracker process.BlockTracker hdrsForCurrBlock hdrForBlock - mutNotarizedHdrs sync.RWMutex - notarizedHdrs mapShardHeaders - - mutLastHdrs sync.RWMutex - lastHdrs mapShardHeader - - onRequestHeaderHandlerByNonce func(shardId uint32, nonce uint64) - onRequestHeaderHandler func(shardId uint32, hash []byte) - appStatusHandler core.AppStatusHandler } @@ -113,25 +103,6 @@ func (bp *baseProcessor) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } -// AddLastNotarizedHdr adds the last notarized header -func (bp *baseProcessor) AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) { - bp.mutNotarizedHdrs.Lock() - bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], processedHdr) - bp.mutNotarizedHdrs.Unlock() -} - -// RestoreLastNotarizedHrdsToGenesis will restore notarized header slice to genesis -func (bp *baseProcessor) RestoreLastNotarizedHrdsToGenesis() { - bp.mutNotarizedHdrs.Lock() - for shardId := range bp.notarizedHdrs { - notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) - if notarizedHdrsCount > 1 { - bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][:1] - } - } - bp.mutNotarizedHdrs.Unlock() -} - // RevertAccountState reverts the account state for cleanup failed process func (bp *baseProcessor) RevertAccountState() { err := bp.accounts.RevertToSnapshot(0) @@ -246,6 +217,11 @@ func (bp *baseProcessor) checkBlockValidity( // TODO: add bodyHandler verification here } + // verification of epoch + if headerHandler.GetEpoch() < currentBlockHeader.GetEpoch() { + return process.ErrEpochDoesNotMatch + } + // TODO: add signature validation as well, with randomness source and all return nil } @@ -271,245 +247,16 @@ func (bp *baseProcessor) getRootHash() []byte { return rootHash } -func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHandler) error { - if prevHdr == nil || prevHdr.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - if currHdr == nil || currHdr.IsInterfaceNil() { - return process.ErrNilBlockHeader - } - - // special case with genesis nonce - 0 - if currHdr.GetNonce() == 0 { - if prevHdr.GetNonce() != 0 { - return process.ErrWrongNonceInBlock - } - // block with nonce 0 was already saved - if prevHdr.GetRootHash() != nil { - return process.ErrRootStateDoesNotMatch - } - return nil - } - - //TODO: add verification if rand seed was correctly computed add other verification - //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected - if prevHdr.GetRound() >= currHdr.GetRound() { - log.Trace("round does not match", - "shard", currHdr.GetShardID(), - "local block round", prevHdr.GetRound(), - "received round", currHdr.GetRound()) - return process.ErrLowerRoundInBlock - } - - if currHdr.GetNonce() != prevHdr.GetNonce()+1 { - log.Trace("nonce does not match", - "shard", currHdr.GetShardID(), - "local block nonce", prevHdr.GetNonce(), - "received nonce", currHdr.GetNonce()) - return process.ErrWrongNonceInBlock - } - - prevHeaderHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, prevHdr) - if err != nil { - return err - } - - if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { - log.Trace("block hash does not match", - "shard", currHdr.GetShardID(), - "local prev hash", prevHeaderHash, - "received block with prev hash", currHdr.GetPrevHash(), - ) - return process.ErrBlockHashDoesNotMatch - } - - if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { - log.Trace("random seed does not match", - "shard", currHdr.GetShardID(), - "local rand seed", prevHdr.GetRandSeed(), - "received block with rand seed", currHdr.GetPrevRandSeed(), - ) - return process.ErrRandSeedDoesNotMatch - } - - return nil -} - -func (bp *baseProcessor) checkHeaderTypeCorrect(shardId uint32, hdr data.HeaderHandler) error { - if shardId >= bp.shardCoordinator.NumberOfShards() && shardId != sharding.MetachainShardId { - return process.ErrShardIdMissmatch - } - - if shardId < bp.shardCoordinator.NumberOfShards() { - _, ok := hdr.(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - } - - if shardId == sharding.MetachainShardId { - _, ok := hdr.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - } - - return nil -} - -func (bp *baseProcessor) removeNotarizedHdrsBehindPreviousFinal(hdrsToPreservedBehindFinal uint32) { - bp.mutNotarizedHdrs.Lock() - for shardId := range bp.notarizedHdrs { - notarizedHdrsCount := uint32(len(bp.notarizedHdrs[shardId])) - if notarizedHdrsCount > hdrsToPreservedBehindFinal { - finalIndex := notarizedHdrsCount - 1 - hdrsToPreservedBehindFinal - bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][finalIndex:] - } - } - bp.mutNotarizedHdrs.Unlock() -} - -func (bp *baseProcessor) removeLastNotarized() { - bp.mutNotarizedHdrs.Lock() - for shardId := range bp.notarizedHdrs { - notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) - if notarizedHdrsCount > 1 { - bp.notarizedHdrs[shardId] = bp.notarizedHdrs[shardId][:notarizedHdrsCount-1] - } - } - bp.mutNotarizedHdrs.Unlock() -} - -func (bp *baseProcessor) lastNotarizedHdrForShard(shardId uint32) data.HeaderHandler { - notarizedHdrsCount := len(bp.notarizedHdrs[shardId]) - if notarizedHdrsCount > 0 { - return bp.notarizedHdrs[shardId][notarizedHdrsCount-1] - } - - return nil -} - -func (bp *baseProcessor) lastHdrForShard(shardId uint32) data.HeaderHandler { - bp.mutLastHdrs.RLock() - defer bp.mutLastHdrs.RUnlock() - - return bp.lastHdrs[shardId] -} - -func (bp *baseProcessor) setLastHdrForShard(shardId uint32, header data.HeaderHandler) { - if check.IfNil(header) { - return - } - - bp.mutLastHdrs.Lock() - defer bp.mutLastHdrs.Unlock() - - lastHeader, ok := bp.lastHdrs[shardId] - if ok && lastHeader.GetRound() > header.GetRound() { - return - } - - bp.lastHdrs[shardId] = header -} - -func (bp *baseProcessor) saveLastNotarizedHeader(shardId uint32, processedHdrs []data.HeaderHandler) error { - bp.mutNotarizedHdrs.Lock() - defer bp.mutNotarizedHdrs.Unlock() - - if bp.notarizedHdrs == nil { - return process.ErrNotarizedHdrsSliceIsNil - } - - err := bp.checkHeaderTypeCorrect(shardId, bp.lastNotarizedHdrForShard(shardId)) - if err != nil { - return err - } - - sort.Slice(processedHdrs, func(i, j int) bool { - return processedHdrs[i].GetNonce() < processedHdrs[j].GetNonce() - }) - - tmpLastNotarizedHdrForShard := bp.lastNotarizedHdrForShard(shardId) - - for i := 0; i < len(processedHdrs); i++ { - err = bp.checkHeaderTypeCorrect(shardId, processedHdrs[i]) - if err != nil { - return err - } - - err = bp.isHdrConstructionValid(processedHdrs[i], tmpLastNotarizedHdrForShard) - if err != nil { - return err - } - - tmpLastNotarizedHdrForShard = processedHdrs[i] - } - - bp.notarizedHdrs[shardId] = append(bp.notarizedHdrs[shardId], tmpLastNotarizedHdrForShard) - DisplayLastNotarized(bp.marshalizer, bp.hasher, tmpLastNotarizedHdrForShard, shardId) - - return nil -} - -func (bp *baseProcessor) getLastNotarizedHdr(shardId uint32) (data.HeaderHandler, error) { - bp.mutNotarizedHdrs.RLock() - defer bp.mutNotarizedHdrs.RUnlock() - - if bp.notarizedHdrs == nil { - return nil, process.ErrNotarizedHdrsSliceIsNil - } - - hdr := bp.lastNotarizedHdrForShard(shardId) - - err := bp.checkHeaderTypeCorrect(shardId, hdr) - if err != nil { - return nil, err - } - - return hdr, nil -} - -// SetLastNotarizedHeadersSlice sets the headers blocks in notarizedHdrs for every shard -// This is done when starting a new epoch so metachain can use it when validating next shard header blocks -// and shard can validate the next meta header -func (bp *baseProcessor) setLastNotarizedHeadersSlice(startHeaders map[uint32]data.HeaderHandler) error { - //TODO: protect this to be called only once at genesis time - //TODO: do this on constructor as it is a must to for blockprocessor to work - bp.mutNotarizedHdrs.Lock() - defer bp.mutNotarizedHdrs.Unlock() - - if startHeaders == nil { - return process.ErrNotarizedHdrsSliceIsNil - } - - bp.notarizedHdrs = make(mapShardHeaders, bp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < bp.shardCoordinator.NumberOfShards(); i++ { - hdr, ok := startHeaders[i].(*block.Header) - if !ok { - return process.ErrWrongTypeAssertion - } - bp.notarizedHdrs[i] = append(bp.notarizedHdrs[i], hdr) - } - - hdr, ok := startHeaders[sharding.MetachainShardId].(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - bp.notarizedHdrs[sharding.MetachainShardId] = append(bp.notarizedHdrs[sharding.MetachainShardId], hdr) - - return nil -} - func (bp *baseProcessor) requestHeadersIfMissing( sortedHdrs []data.HeaderHandler, shardId uint32, maxRound uint64, - cacher storage.Cacher, + cacherMaxSize int, ) error { - allowedSize := uint64(float64(cacher.MaxSize()) * process.MaxOccupancyPercentageAllowed) + allowedSize := uint64(float64(cacherMaxSize) * process.MaxOccupancyPercentageAllowed) - prevHdr, err := bp.getLastNotarizedHdr(shardId) + prevHdr, _, err := bp.blockTracker.GetLastCrossNotarizedHeader(shardId) if err != nil { return err } @@ -541,11 +288,6 @@ func (bp *baseProcessor) requestHeadersIfMissing( requested := 0 for _, nonce := range missingNonces { - // do the request here - if bp.onRequestHeaderHandlerByNonce == nil { - return process.ErrNilRequestHeaderHandlerByNonce - } - isHeaderOutOfRange := nonce > lastNotarizedHdrNonce+allowedSize if isHeaderOutOfRange { break @@ -556,7 +298,7 @@ func (bp *baseProcessor) requestHeadersIfMissing( } requested++ - go bp.onRequestHeaderHandlerByNonce(shardId, nonce) + go bp.requestHeaderByShardAndNonce(shardId, nonce) } return nil @@ -564,6 +306,10 @@ func (bp *baseProcessor) requestHeadersIfMissing( func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { return []*display.LineData{ + display.NewLineData(false, []string{ + "", + "ChainID", + display.DisplayByteSlice(headerHandler.GetChainID())}), display.NewLineData(false, []string{ "", "Epoch", @@ -608,10 +354,14 @@ func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { "", "Root hash", display.DisplayByteSlice(headerHandler.GetRootHash())}), - display.NewLineData(true, []string{ + display.NewLineData(false, []string{ "", "Validator stats root hash", display.DisplayByteSlice(headerHandler.GetValidatorStatsRootHash())}), + display.NewLineData(true, []string{ + "", + "Receipts hash", + display.DisplayByteSlice(headerHandler.GetReceiptsHash())}), } } @@ -648,18 +398,27 @@ func checkProcessorNilParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.RequestHandler) { return process.ErrNilRequestHandler } + if check.IfNil(arguments.EpochStartTrigger) { + return process.ErrNilEpochStartTrigger + } if check.IfNil(arguments.Rounder) { return process.ErrNilRounder } if check.IfNil(arguments.BootStorer) { return process.ErrNilStorage } - if arguments.BlockChainHook == nil || arguments.BlockChainHook.IsInterfaceNil() { + if check.IfNil(arguments.BlockChainHook) { return process.ErrNilBlockChainHook } - if arguments.TxCoordinator == nil || arguments.TxCoordinator.IsInterfaceNil() { + if check.IfNil(arguments.TxCoordinator) { return process.ErrNilTransactionCoordinator } + if check.IfNil(arguments.HeaderValidator) { + return process.ErrNilHeaderValidator + } + if check.IfNil(arguments.BlockTracker) { + return process.ErrNilBlockTracker + } return nil } @@ -812,14 +571,17 @@ func (bp *baseProcessor) checkHeaderBodyCorrelation(miniBlockHeaders []block.Min return nil } -func (bp *baseProcessor) isHeaderOutOfRange(header data.HeaderHandler, cacher storage.Cacher) bool { - lastNotarizedHdr, err := bp.getLastNotarizedHdr(header.GetShardID()) +func (bp *baseProcessor) isHeaderOutOfRange(header data.HeaderHandler, cacherMaxSize int) bool { + lastCrossNotarizedHeader, _, err := bp.blockTracker.GetLastCrossNotarizedHeader(header.GetShardID()) if err != nil { + log.Debug("isHeaderOutOfRange", + "shard", header.GetShardID(), + "error", err.Error()) return false } - allowedSize := uint64(float64(cacher.MaxSize()) * process.MaxOccupancyPercentageAllowed) - isHeaderOutOfRange := header.GetNonce() > lastNotarizedHdr.GetNonce()+allowedSize + allowedSize := uint64(float64(cacherMaxSize) * process.MaxOccupancyPercentageAllowed) + isHeaderOutOfRange := header.GetNonce() > lastCrossNotarizedHeader.GetNonce()+allowedSize return isHeaderOutOfRange } @@ -830,8 +592,6 @@ func (bp *baseProcessor) isHeaderOutOfRange(header data.HeaderHandler, cacher st func (bp *baseProcessor) requestMissingFinalityAttestingHeaders( shardId uint32, finality uint32, - getHeaderFromPoolWithNonce func(uint64, uint32) (data.HeaderHandler, []byte, error), - cacher storage.Cacher, ) uint32 { requestedHeaders := uint32(0) missingFinalityAttestingHeaders := uint32(0) @@ -843,12 +603,12 @@ func (bp *baseProcessor) requestMissingFinalityAttestingHeaders( lastFinalityAttestingHeader := highestHdrNonce + uint64(finality) for i := highestHdrNonce + 1; i <= lastFinalityAttestingHeader; i++ { - headers, headersHashes := bp.getHeadersFromPools(getHeaderFromPoolWithNonce, cacher, shardId, i) + headers, headersHashes := bp.blockTracker.GetTrackedHeadersWithNonce(shardId, i) if len(headers) == 0 { missingFinalityAttestingHeaders++ requestedHeaders++ - go bp.onRequestHeaderHandlerByNonce(shardId, i) + go bp.requestHeaderByShardAndNonce(shardId, i) continue } @@ -866,90 +626,100 @@ func (bp *baseProcessor) requestMissingFinalityAttestingHeaders( return missingFinalityAttestingHeaders } -func (bp *baseProcessor) isShardStuck(shardId uint32) bool { - header := bp.lastHdrForShard(shardId) - if check.IfNil(header) { - return false +func (bp *baseProcessor) requestHeaderByShardAndNonce(targetShardID uint32, nonce uint64) { + if targetShardID == sharding.MetachainShardId { + bp.requestHandler.RequestMetaHeaderByNonce(nonce) + } else { + bp.requestHandler.RequestShardHeaderByNonce(targetShardID, nonce) } - - isShardStuck := bp.rounder.Index()-int64(header.GetRound()) > process.MaxRoundsWithoutCommittedBlock - return isShardStuck } func (bp *baseProcessor) cleanupPools( - headersNoncesPool dataRetriever.Uint64SyncMapCacher, - headersPool storage.Cacher, - notarizedHeadersPool storage.Cacher, + headerHandler data.HeaderHandler, + headersPool dataRetriever.HeadersPool, ) { + noncesToFinal := bp.getNoncesToFinal(headerHandler) + bp.removeHeadersBehindNonceFromPools( true, headersPool, - headersNoncesPool, bp.shardCoordinator.SelfId(), bp.forkDetector.GetHighestFinalBlockNonce()) - for shardId := range bp.notarizedHdrs { - lastNotarizedHdr := bp.lastNotarizedHdrForShard(shardId) - if check.IfNil(lastNotarizedHdr) { + for shardID := uint32(0); shardID < bp.shardCoordinator.NumberOfShards(); shardID++ { + if bp.shardCoordinator.SelfId() == shardID { continue } - bp.removeHeadersBehindNonceFromPools( - false, - notarizedHeadersPool, - headersNoncesPool, - shardId, - lastNotarizedHdr.GetNonce()) + bp.cleanupPoolsForShard(shardID, headersPool, noncesToFinal) + } + + if bp.shardCoordinator.SelfId() != sharding.MetachainShardId { + bp.cleanupPoolsForShard(sharding.MetachainShardId, headersPool, noncesToFinal) } +} - return +func (bp *baseProcessor) cleanupPoolsForShard( + shardID uint32, + headersPool dataRetriever.HeadersPool, + noncesToFinal uint64, +) { + crossNotarizedHeader, _, err := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToFinal) + if err != nil { + log.Trace("cleanupPoolsForShard", + "shard", shardID, + "nonces to final", noncesToFinal, + "error", err.Error()) + return + } + + bp.removeHeadersBehindNonceFromPools( + false, + headersPool, + shardID, + crossNotarizedHeader.GetNonce(), + ) } func (bp *baseProcessor) removeHeadersBehindNonceFromPools( shouldRemoveBlockBody bool, - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, + headersPool dataRetriever.HeadersPool, shardId uint32, nonce uint64, ) { - if nonce <= 1 { return } - if check.IfNil(cacher) { + if check.IfNil(headersPool) { return } - for _, key := range cacher.Keys() { - val, _ := cacher.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(data.HeaderHandler) - if !ok { - continue - } - - if hdr.GetShardID() != shardId || hdr.GetNonce() >= nonce { + nonces := headersPool.Nonces(shardId) + for _, nonceFromCache := range nonces { + if nonceFromCache >= nonce { continue } if shouldRemoveBlockBody { - errNotCritical := bp.removeBlockBodyOfHeader(hdr) - if errNotCritical != nil { - log.Debug("RemoveBlockDataFromPool", "error", errNotCritical.Error()) - } + bp.removeBlocksBody(nonceFromCache, shardId, headersPool) } - cacher.Remove(key) + headersPool.RemoveHeaderByNonceAndShardId(nonceFromCache, shardId) + } +} - if check.IfNil(uint64SyncMapCacher) { - continue - } +func (bp *baseProcessor) removeBlocksBody(nonce uint64, shardId uint32, headersPool dataRetriever.HeadersPool) { + headers, _, err := headersPool.GetHeadersByNonceAndShardId(nonce, shardId) + if err != nil { + return + } - uint64SyncMapCacher.Remove(hdr.GetNonce(), hdr.GetShardID()) + for _, header := range headers { + errNotCritical := bp.removeBlockBodyOfHeader(header) + if errNotCritical != nil { + log.Debug("RemoveBlockDataFromPool", "error", errNotCritical.Error()) + } } } @@ -972,150 +742,117 @@ func (bp *baseProcessor) removeBlockBodyOfHeader(headerHandler data.HeaderHandle return nil } -func (bp *baseProcessor) removeHeaderFromPools( - header data.HeaderHandler, - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, -) { - - if check.IfNil(header) { - return - } +func (bp *baseProcessor) cleanupBlockTrackerPools(headerHandler data.HeaderHandler) { + noncesToFinal := bp.getNoncesToFinal(headerHandler) - headerHash, err := core.CalculateHash(bp.marshalizer, bp.hasher, header) - if err != nil { - return + for shardID := uint32(0); shardID < bp.shardCoordinator.NumberOfShards(); shardID++ { + bp.cleanupBlockTrackerPoolsForShard(shardID, noncesToFinal) } - if !check.IfNil(cacher) { - cacher.Remove(headerHash) - } - - if !check.IfNil(uint64SyncMapCacher) { - syncMap, ok := uint64SyncMapCacher.Get(header.GetNonce()) - if !ok { - return - } - - hash, ok := syncMap.Load(header.GetShardID()) - if hash == nil || !ok { - return - } - - if bytes.Equal(headerHash, hash) { - uint64SyncMapCacher.Remove(header.GetNonce(), header.GetShardID()) - } - } + bp.cleanupBlockTrackerPoolsForShard(sharding.MetachainShardId, noncesToFinal) } -func (bp *baseProcessor) getHeadersFromPools( - getHeaderFromPoolWithNonce func(uint64, uint32) (data.HeaderHandler, []byte, error), - cacher storage.Cacher, - shardId uint32, - nonce uint64, -) ([]data.HeaderHandler, [][]byte) { - - keys := cacher.Keys() - headers := make([]data.HeaderHandler, 0, len(keys)+1) - headersHashes := make([][]byte, 0, len(keys)+1) - - //TODO: This for could be deleted when the implementation of the new cache will be done - for _, headerHash := range keys { - val, _ := cacher.Peek(headerHash) - if val == nil { - continue - } - - header, ok := val.(data.HeaderHandler) - if !ok { - continue - } +func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, noncesToFinal uint64) { + selfNotarizedNonce := bp.forkDetector.GetHighestFinalBlockNonce() + crossNotarizedNonce := uint64(0) - if header.GetShardID() == shardId && header.GetNonce() == nonce { - headers = append(headers, header) - headersHashes = append(headersHashes, headerHash) + if shardID != bp.shardCoordinator.SelfId() { + crossNotarizedHeader, _, err := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToFinal) + if err != nil { + log.Trace("cleanupBlockTrackerPoolsForShard", + "shard", shardID, + "nonces to final", noncesToFinal, + "error", err.Error()) + return } - } - header, headerHash, err := getHeaderFromPoolWithNonce(nonce, shardId) - if err != nil { - return headers, headersHashes + crossNotarizedNonce = crossNotarizedHeader.GetNonce() } - headers = append(headers, header) - headersHashes = append(headersHashes, headerHash) - - return headers, sliceUtil.TrimSliceSliceByte(headersHashes) + bp.blockTracker.CleanupHeadersBehindNonce( + shardID, + selfNotarizedNonce, + crossNotarizedNonce, + ) } func (bp *baseProcessor) prepareDataForBootStorer( headerInfo bootstrapStorage.BootstrapHeaderInfo, round uint64, - lastFinalHdrs []data.HeaderHandler, - lastFinalHashes [][]byte, + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, + highestFinalBlockNonce uint64, processedMiniBlocks []bootstrapStorage.MiniBlocksInMeta, ) { - lastFinals := make([]bootstrapStorage.BootstrapHeaderInfo, 0, len(lastFinalHdrs)) + lastSelfNotarizedHeaders := make([]bootstrapStorage.BootstrapHeaderInfo, 0, len(selfNotarizedHeaders)) //TODO add end of epoch stuff - lastNotarizedHdrs := bp.getLastNotarizedHdrs() - highestFinalNonce := bp.forkDetector.GetHighestFinalBlockNonce() + lastCrossNotarizedHeaders := bp.getLastCrossNotarizedHeaders() - for i := range lastFinalHdrs { + for i := range selfNotarizedHeaders { headerInfo := bootstrapStorage.BootstrapHeaderInfo{ - ShardId: lastFinalHdrs[i].GetShardID(), - Nonce: lastFinalHdrs[i].GetNonce(), - Hash: lastFinalHashes[i], + ShardId: selfNotarizedHeaders[i].GetShardID(), + Nonce: selfNotarizedHeaders[i].GetNonce(), + Hash: selfNotarizedHeadersHashes[i], } - lastFinals = append(lastFinals, headerInfo) + lastSelfNotarizedHeaders = append(lastSelfNotarizedHeaders, headerInfo) } bootData := bootstrapStorage.BootstrapData{ - LastHeader: headerInfo, - LastNotarizedHeaders: lastNotarizedHdrs, - LastFinals: lastFinals, - HighestFinalNonce: highestFinalNonce, - ProcessedMiniBlocks: processedMiniBlocks, + LastHeader: headerInfo, + LastCrossNotarizedHeaders: lastCrossNotarizedHeaders, + LastSelfNotarizedHeaders: lastSelfNotarizedHeaders, + HighestFinalBlockNonce: highestFinalBlockNonce, + ProcessedMiniBlocks: processedMiniBlocks, } - go func() { - err := bp.bootStorer.Put(int64(round), bootData) - if err != nil { - log.Warn("cannot save boot data in storage", - "error", err.Error()) - } - }() + err := bp.bootStorer.Put(int64(round), bootData) + if err != nil { + log.Warn("cannot save boot data in storage", + "error", err.Error()) + } } -func (bp *baseProcessor) getLastNotarizedHdrs() []bootstrapStorage.BootstrapHeaderInfo { - lastNotarizedHdrs := make([]bootstrapStorage.BootstrapHeaderInfo, 0, len(bp.notarizedHdrs)) - - bp.mutNotarizedHdrs.RLock() - for shardId := range bp.notarizedHdrs { - hdr := bp.lastNotarizedHdrForShard(shardId) +func (bp *baseProcessor) getLastCrossNotarizedHeaders() []bootstrapStorage.BootstrapHeaderInfo { + lastCrossNotarizedHeaders := make([]bootstrapStorage.BootstrapHeaderInfo, 0, bp.shardCoordinator.NumberOfShards()+1) - hdrNonce := hdr.GetNonce() - if hdrNonce == 0 { - continue + for shardID := uint32(0); shardID < bp.shardCoordinator.NumberOfShards(); shardID++ { + bootstrapHeaderInfo := bp.getLastCrossNotarizedHeadersForShard(shardID) + if bootstrapHeaderInfo != nil { + lastCrossNotarizedHeaders = append(lastCrossNotarizedHeaders, *bootstrapHeaderInfo) } + } - hash, err := core.CalculateHash(bp.marshalizer, bp.hasher, hdr) - if err != nil { - continue - } + bootstrapHeaderInfo := bp.getLastCrossNotarizedHeadersForShard(sharding.MetachainShardId) + if bootstrapHeaderInfo != nil { + lastCrossNotarizedHeaders = append(lastCrossNotarizedHeaders, *bootstrapHeaderInfo) + } - headerInfo := bootstrapStorage.BootstrapHeaderInfo{ - ShardId: hdr.GetShardID(), - Nonce: hdrNonce, - Hash: hash, - } - lastNotarizedHdrs = append(lastNotarizedHdrs, headerInfo) + return bootstrapStorage.TrimHeaderInfoSlice(lastCrossNotarizedHeaders) +} + +func (bp *baseProcessor) getLastCrossNotarizedHeadersForShard(shardID uint32) *bootstrapStorage.BootstrapHeaderInfo { + lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash, err := bp.blockTracker.GetLastCrossNotarizedHeader(shardID) + if err != nil { + log.Debug("getLastCrossNotarizedHeadersForShard", + "shard", shardID, + "error", err.Error()) + return nil } - bp.mutNotarizedHdrs.RUnlock() - return bootstrapStorage.TrimHeaderInfoSlice(lastNotarizedHdrs) + if lastCrossNotarizedHeader.GetNonce() == 0 { + return nil + } + + headerInfo := &bootstrapStorage.BootstrapHeaderInfo{ + ShardId: lastCrossNotarizedHeader.GetShardID(), + Nonce: lastCrossNotarizedHeader.GetNonce(), + Hash: lastCrossNotarizedHeaderHash, + } + + return headerInfo } func (bp *baseProcessor) commitAll() error { @@ -1131,3 +868,41 @@ func (bp *baseProcessor) commitAll() error { return nil } + +func deleteSelfReceiptsMiniBlocks(body block.Body) block.Body { + for i := 0; i < len(body); { + mb := body[i] + if mb.ReceiverShardID != mb.SenderShardID { + i++ + continue + } + + if mb.Type != block.ReceiptBlock && mb.Type != block.SmartContractResultBlock { + i++ + continue + } + + body[i] = body[len(body)-1] + body = body[:len(body)-1] + if i == len(body)-1 { + break + } + } + + return body +} + +func (bp *baseProcessor) getNoncesToFinal(headerHandler data.HeaderHandler) uint64 { + currentBlockNonce := uint64(0) + if !check.IfNil(headerHandler) { + currentBlockNonce = headerHandler.GetNonce() + } + + noncesToFinal := uint64(0) + finalBlockNonce := bp.forkDetector.GetHighestFinalBlockNonce() + if currentBlockNonce > finalBlockNonce { + noncesToFinal = currentBlockNonce - finalBlockNonce + } + + return noncesToFinal +} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 6a76f9039b1..da2b742c95e 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -40,11 +40,9 @@ func generateTestCache() storage.Cacher { } func generateTestUnit() storage.Storer { - memDB, _ := memorydb.New() - storer, _ := storageUnit.NewStorageUnit( generateTestCache(), - memDB, + memorydb.New(), ) return storer @@ -105,15 +103,6 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { TransactionsCalled: txCalled, UnsignedTransactionsCalled: unsignedTxCalled, RewardTransactionsCalled: rewardTransactionsCalled, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{ - MergeCalled: func(u uint64, syncMap dataRetriever.ShardIdHashMap) {}, - HasCalled: func(nonce uint64, shardId uint32) bool { - return true - }, - RemoveCalled: func(nonce uint64, shardId uint32) {}, - } - }, MetaBlocksCalled: func() storage.Cacher { return &mock.CacherStub{ GetCalled: func(key []byte) (value interface{}, ok bool) { @@ -169,11 +158,14 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { } return cs }, - HeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + HeadersCalled: func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } - cs.RemoveCalled = func(key []byte) { + cs.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, err error) { + return nil, err + } + cs.RemoveHeaderByHashCalled = func(key []byte) { } cs.LenCalled = func() int { return 0 @@ -181,7 +173,7 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { cs.MaxSizeCalled = func() int { return 1000 } - cs.KeysCalled = func() [][]byte { + cs.NoncesCalled = func(shardId uint32) []uint64 { return nil } return cs @@ -193,33 +185,6 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { func initMetaDataPool() *mock.MetaPoolsHolderStub { mdp := &mock.MetaPoolsHolderStub{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - MaxSizeCalled: func() int { - return 1000 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - RemoveCalled: func(key []byte) {}, - } - }, MiniBlocksCalled: func() storage.Cacher { cs := &mock.CacherStub{} cs.RegisterHandlerCalled = func(i func(key []byte)) { @@ -242,15 +207,15 @@ func initMetaDataPool() *mock.MetaPoolsHolderStub { } return cs }, - ShardHeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + HeadersCalled: func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal([]byte("hdr_hash1"), key) { - return &block.Header{Nonce: 1}, true + cs.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal([]byte("hdr_hash1"), hash) { + return &block.Header{Nonce: 1}, nil } - return nil, false + return nil, errors.New("err") } cs.LenCalled = func() int { return 0 @@ -258,18 +223,12 @@ func initMetaDataPool() *mock.MetaPoolsHolderStub { cs.MaxSizeCalled = func() int { return 1000 } - cs.RemoveCalled = func(key []byte) {} - cs.KeysCalled = func() [][]byte { + cs.RemoveHeaderByHashCalled = func(key []byte) {} + cs.NoncesCalled = func(shardId uint32) []uint64 { return nil } return cs }, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - cs := &mock.Uint64SyncMapCacherStub{} - cs.MergeCalled = func(u uint64, syncMap dataRetriever.ShardIdHashMap) {} - cs.RemoveCalled = func(nonce uint64, shardId uint32) {} - return cs - }, } return mdp } @@ -356,6 +315,13 @@ func CreateMockArguments() blproc.ArgShardProcessor { shardCoordinator, nodesCoordinator, ) + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: &mock.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + + startHeaders := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) arguments := blproc.ArgShardProcessor{ ArgBaseProcessor: blproc.ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, @@ -367,18 +333,20 @@ func CreateMockArguments() blproc.ArgShardProcessor { NodesCoordinator: nodesCoordinator, SpecialAddressHandler: specialAddressHandler, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - StartHeaders: createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - RequestHandler: &mock.RequestHandlerMock{}, + RequestHandler: &mock.RequestHandlerStub{}, Core: &mock.ServiceContainerMock{}, BlockChainHook: &mock.BlockChainHookHandlerMock{}, TxCoordinator: &mock.TransactionCoordinatorMock{}, ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorMock{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + HeaderValidator: headerValidator, Rounder: &mock.RounderMock{}, BootStorer: &mock.BoostrapStorerMock{ PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { return nil }, }, + BlockTracker: mock.NewBlockTrackerMock(shardCoordinator, startHeaders), }, DataPool: initDataPool([]byte("")), TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, @@ -515,97 +483,7 @@ func TestBlockPorcessor_ComputeNewNoncePrevHashShouldWork(t *testing.T) { assert.Nil(t, err) } -func TestBaseProcessor_SetLastNotarizedHeadersSliceNil(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewOneShardCoordinatorMock()) - - err := base.SetLastNotarizedHeadersSlice(nil) - - assert.Equal(t, process.ErrNotarizedHdrsSliceIsNil, err) -} - -func TestBaseProcessor_SetLastNotarizedHeadersSliceNotEnoughHeaders(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewOneShardCoordinatorMock()) - - err := base.SetLastNotarizedHeadersSlice(make(map[uint32]data.HeaderHandler, 0)) - - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - -func TestBaseProcessor_SetLastNotarizedHeadersSliceOneShardWrongType(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewOneShardCoordinatorMock()) - - lastNotHdrs := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - lastNotHdrs[0] = &block.MetaBlock{} - err := base.SetLastNotarizedHeadersSlice(lastNotHdrs) - - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - -func TestBaseProcessor_SetLastNotarizedHeadersSliceOneShardGood(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewOneShardCoordinatorMock()) - - lastNotHdrs := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - err := base.SetLastNotarizedHeadersSlice(lastNotHdrs) - - assert.Nil(t, err) -} - -func TestBaseProcessor_SetLastNotarizedHeadersSliceOneShardMetaMissing(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewOneShardCoordinatorMock()) - - lastNotHdrs := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - lastNotHdrs[sharding.MetachainShardId] = nil - err := base.SetLastNotarizedHeadersSlice(lastNotHdrs) - - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - -func TestBaseProcessor_SetLastNotarizedHeadersSliceOneShardMetaWrongType(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewOneShardCoordinatorMock()) - - lastNotHdrs := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - lastNotHdrs[sharding.MetachainShardId] = &block.Header{} - err := base.SetLastNotarizedHeadersSlice(lastNotHdrs) - - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - -func TestBaseProcessor_SetLastNotarizedHeadersSliceMultiShardGood(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewMultiShardsCoordinatorMock(5)) - - lastNotHdrs := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(5)) - err := base.SetLastNotarizedHeadersSlice(lastNotHdrs) - - assert.Nil(t, err) -} - -func TestBaseProcessor_SetLastNotarizedHeadersSliceMultiShardNotEnough(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewMultiShardsCoordinatorMock(5)) - - lastNotHdrs := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(4)) - lastNotHdrs[sharding.MetachainShardId] = nil - err := base.SetLastNotarizedHeadersSlice(lastNotHdrs) - - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - -func createShardProcessHeadersToSaveLastNoterized( +func createShardProcessHeadersToSaveLastNotarized( highestNonce uint64, genesisHdr data.HeaderHandler, hasher hashing.Hasher, @@ -667,204 +545,168 @@ func createMetaProcessHeadersToSaveLastNoterized( return processedHdrs } -func TestBaseProcessor_SaveLastNoterizedHdrLastNotSliceNotSet(t *testing.T) { - t.Parallel() - - base := blproc.NewBaseProcessor(mock.NewMultiShardsCoordinatorMock(5)) - base.SetHasher(mock.HasherMock{}) - base.SetMarshalizer(&mock.MarshalizerMock{}) - prHdrs := createShardProcessHeadersToSaveLastNoterized(10, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) - - err := base.SaveLastNotarizedHeader(2, prHdrs) - - assert.Equal(t, process.ErrNotarizedHdrsSliceIsNil, err) -} - -func TestBaseProcessor_SaveLastNoterizedHdrLastNotShardIdMissmatch(t *testing.T) { +func TestBaseProcessor_SaveLastNotarizedInOneShardHdrsSliceForShardIsNil(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - base.SetHasher(mock.HasherMock{}) - base.SetMarshalizer(&mock.MarshalizerMock{}) - _ = base.SetLastNotarizedHeadersSlice(createGenesisBlocks(shardCoordinator)) - prHdrs := createShardProcessHeadersToSaveLastNoterized(10, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) - - err := base.SaveLastNotarizedHeader(6, prHdrs) - - assert.Equal(t, process.ErrShardIdMissmatch, err) -} - -func TestBaseProcessor_SaveLastNoterizedHdrLastNotHdrNil(t *testing.T) { - t.Parallel() - - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - base.SetHasher(mock.HasherMock{}) - base.SetMarshalizer(&mock.MarshalizerMock{}) - - // make it wrong - shardId := uint32(2) - genesisBlock := createGenesisBlocks(shardCoordinator) - genesisBlock[shardId] = nil - - _ = base.SetLastNotarizedHeadersSlice(genesisBlock) - prHdrs := createShardProcessHeadersToSaveLastNoterized(10, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) + arguments := CreateMockArguments() + arguments.Hasher = &mock.HasherMock{} + arguments.Marshalizer = &mock.MarshalizerMock{} + sp, _ := blproc.NewShardProcessor(arguments) + prHdrs := createShardProcessHeadersToSaveLastNotarized(10, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) - err := base.SaveLastNotarizedHeader(shardId, prHdrs) + err := sp.SaveLastNotarizedHeader(2, prHdrs) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.Equal(t, process.ErrNotarizedHeadersSliceForShardIsNil, err) } -func TestBaseProcessor_SaveLastNoterizedHdrLastNotWrongTypeShard(t *testing.T) { +func TestBaseProcessor_SaveLastNotarizedInMultiShardHdrsSliceForShardIsNil(t *testing.T) { t.Parallel() shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - base.SetHasher(mock.HasherMock{}) - base.SetMarshalizer(&mock.MarshalizerMock{}) - - // make it wrong - shardId := uint32(2) - genesisBlock := createGenesisBlocks(shardCoordinator) - genesisBlock[shardId] = &block.MetaBlock{Nonce: 0} + arguments := CreateMockArguments() + arguments.Hasher = &mock.HasherMock{} + arguments.Marshalizer = &mock.MarshalizerMock{} + arguments.ShardCoordinator = shardCoordinator + sp, _ := blproc.NewShardProcessor(arguments) - _ = base.SetLastNotarizedHeadersSlice(genesisBlock) - prHdrs := createShardProcessHeadersToSaveLastNoterized(10, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) + prHdrs := createShardProcessHeadersToSaveLastNotarized(10, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) - err := base.SaveLastNotarizedHeader(shardId, prHdrs) + err := sp.SaveLastNotarizedHeader(6, prHdrs) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.Equal(t, process.ErrNotarizedHeadersSliceForShardIsNil, err) } -func TestBaseProcessor_SaveLastNoterizedHdrLastNotWrongTypeMeta(t *testing.T) { +func TestBaseProcessor_SaveLastNotarizedHdrShardGood(t *testing.T) { t.Parallel() shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - base.SetHasher(mock.HasherMock{}) - base.SetMarshalizer(&mock.MarshalizerMock{}) + arguments := CreateMockArguments() + arguments.Hasher = &mock.HasherMock{} + arguments.Marshalizer = &mock.MarshalizerMock{} + arguments.ShardCoordinator = shardCoordinator + sp, _ := blproc.NewShardProcessor(arguments) + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: arguments.Hasher, + Marshalizer: arguments.Marshalizer, + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + sp.SetHeaderValidator(headerValidator) - // make it wrong - genesisBlock := createGenesisBlocks(shardCoordinator) - genesisBlock[sharding.MetachainShardId] = &block.Header{Nonce: 0} + genesisBlcks := createGenesisBlocks(shardCoordinator) - _ = base.SetLastNotarizedHeadersSlice(genesisBlock) - prHdrs := createMetaProcessHeadersToSaveLastNoterized(10, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) + highestNonce := uint64(10) + shardId := uint32(0) + prHdrs := createShardProcessHeadersToSaveLastNotarized(highestNonce, genesisBlcks[shardId], arguments.Hasher, arguments.Marshalizer) - err := base.SaveLastNotarizedHeader(sharding.MetachainShardId, prHdrs) + err := sp.SaveLastNotarizedHeader(shardId, prHdrs) + assert.Nil(t, err) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.Equal(t, highestNonce, sp.LastNotarizedHdrForShard(shardId).GetNonce()) } -func TestBaseProcessor_SaveLastNoterizedHdrShardWrongProcessed(t *testing.T) { +func TestBaseProcessor_SaveLastNotarizedHdrMetaGood(t *testing.T) { t.Parallel() shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - base.SetHasher(mock.HasherMock{}) - base.SetMarshalizer(&mock.MarshalizerMock{}) - _ = base.SetLastNotarizedHeadersSlice(createGenesisBlocks(shardCoordinator)) - highestNonce := uint64(10) - prHdrs := createMetaProcessHeadersToSaveLastNoterized(highestNonce, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) - - shardId := uint32(0) - err := base.SaveLastNotarizedHeader(shardId, prHdrs) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + arguments := CreateMockArguments() + arguments.Hasher = &mock.HasherMock{} + arguments.Marshalizer = &mock.MarshalizerMock{} + arguments.ShardCoordinator = shardCoordinator + sp, _ := blproc.NewShardProcessor(arguments) - notarizedHdrs := base.NotarizedHdrs() - assert.Equal(t, uint64(0), notarizedHdrs[shardId][0].GetNonce()) -} + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: arguments.Hasher, + Marshalizer: arguments.Marshalizer, + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + sp.SetHeaderValidator(headerValidator) -func TestBaseProcessor_SaveLastNoterizedHdrMetaWrongProcessed(t *testing.T) { - t.Parallel() + genesisBlcks := createGenesisBlocks(shardCoordinator) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - base.SetHasher(mock.HasherMock{}) - base.SetMarshalizer(&mock.MarshalizerMock{}) - _ = base.SetLastNotarizedHeadersSlice(createGenesisBlocks(shardCoordinator)) highestNonce := uint64(10) - prHdrs := createShardProcessHeadersToSaveLastNoterized(highestNonce, &block.Header{}, mock.HasherMock{}, &mock.MarshalizerMock{}) + prHdrs := createMetaProcessHeadersToSaveLastNoterized(highestNonce, genesisBlcks[sharding.MetachainShardId], arguments.Hasher, arguments.Marshalizer) - err := base.SaveLastNotarizedHeader(sharding.MetachainShardId, prHdrs) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + err := sp.SaveLastNotarizedHeader(sharding.MetachainShardId, prHdrs) + assert.Nil(t, err) - notarizedHdrs := base.NotarizedHdrs() - assert.Equal(t, uint64(0), notarizedHdrs[sharding.MetachainShardId][0].GetNonce()) + assert.Equal(t, highestNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) } -func TestBaseProcessor_SaveLastNoterizedHdrShardGood(t *testing.T) { +func TestShardProcessor_ProcessBlockEpochDoesNotMatchShouldErr(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - hasher := mock.HasherMock{} - base.SetHasher(hasher) - marshalizer := &mock.MarshalizerMock{} - base.SetMarshalizer(marshalizer) - genesisBlcks := createGenesisBlocks(shardCoordinator) - _ = base.SetLastNotarizedHeadersSlice(genesisBlcks) - - highestNonce := uint64(10) - shardId := uint32(0) - prHdrs := createShardProcessHeadersToSaveLastNoterized(highestNonce, genesisBlcks[shardId], hasher, marshalizer) + arguments := CreateMockArgumentsMultiShard() + sp, _ := blproc.NewShardProcessor(arguments) + blockChain := &mock.BlockChainMock{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Epoch: 2, + } + }, + } + header := &block.Header{Round: 10, Nonce: 1} - err := base.SaveLastNotarizedHeader(shardId, prHdrs) - assert.Nil(t, err) + blk := make(block.Body, 0) + err := sp.ProcessBlock(blockChain, header, blk, func() time.Duration { return time.Second }) - assert.Equal(t, highestNonce, base.LastNotarizedHdrForShard(shardId).GetNonce()) + assert.Equal(t, process.ErrEpochDoesNotMatch, err) } -func TestBaseProcessor_SaveLastNoterizedHdrMetaGood(t *testing.T) { +func TestShardProcessor_ProcessBlockEpochDoesNotMatchShouldErr2(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) - base := blproc.NewBaseProcessor(shardCoordinator) - hasher := mock.HasherMock{} - base.SetHasher(hasher) - marshalizer := &mock.MarshalizerMock{} - base.SetMarshalizer(marshalizer) - genesisBlcks := createGenesisBlocks(shardCoordinator) - _ = base.SetLastNotarizedHeadersSlice(genesisBlcks) + arguments := CreateMockArgumentsMultiShard() + arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ + EpochCalled: func() uint32 { + return 1 + }, + } - highestNonce := uint64(10) - prHdrs := createMetaProcessHeadersToSaveLastNoterized(highestNonce, genesisBlcks[sharding.MetachainShardId], hasher, marshalizer) + randSeed := []byte("randseed") + sp, _ := blproc.NewShardProcessor(arguments) + blockChain := &mock.BlockChainMock{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Epoch: 1, + RandSeed: randSeed, + } + }, + } + header := &block.Header{Round: 10, Nonce: 1, Epoch: 5, RandSeed: randSeed, PrevRandSeed: randSeed} - err := base.SaveLastNotarizedHeader(sharding.MetachainShardId, prHdrs) - assert.Nil(t, err) + blk := make(block.Body, 0) + err := sp.ProcessBlock(blockChain, header, blk, func() time.Duration { return time.Second }) - assert.Equal(t, highestNonce, base.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) + assert.Equal(t, process.ErrEpochDoesNotMatch, err) } -func TestBaseProcessor_RemoveLastNotarizedShouldNotDeleteTheLastRecord(t *testing.T) { +func TestShardProcessor_ProcessBlockEpochDoesNotMatchShouldErr3(t *testing.T) { t.Parallel() - nrShards := uint32(5) - shardCoordinator := mock.NewMultiShardsCoordinatorMock(nrShards) - base := blproc.NewBaseProcessor(shardCoordinator) - hasher := mock.HasherMock{} - base.SetHasher(hasher) - marshalizer := &mock.MarshalizerMock{} - base.SetMarshalizer(marshalizer) - genesisBlcks := createGenesisBlocks(shardCoordinator) - _ = base.SetLastNotarizedHeadersSlice(genesisBlcks) - - for i := uint32(0); i < nrShards; i++ { - base.AddLastNotarizedHdr(i, &block.Header{Nonce: 1}) + arguments := CreateMockArgumentsMultiShard() + arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ + EpochCalled: func() uint32 { + return 2 + }, + IsEpochStartCalled: func() bool { + return true + }, } - base.RemoveLastNotarized() - - for i := uint32(0); i < nrShards; i++ { - hdr := base.LastNotarizedHdrForShard(i) - assert.Equal(t, genesisBlcks[i], hdr) + randSeed := []byte("randseed") + sp, _ := blproc.NewShardProcessor(arguments) + blockChain := &mock.BlockChainMock{ + GetCurrentBlockHeaderCalled: func() data.HeaderHandler { + return &block.Header{ + Epoch: 3, + RandSeed: randSeed, + } + }, } + header := &block.Header{Round: 10, Nonce: 1, Epoch: 5, RandSeed: randSeed, PrevRandSeed: randSeed} - base.RemoveLastNotarized() + blk := make(block.Body, 0) + err := sp.ProcessBlock(blockChain, header, blk, func() time.Duration { return time.Second }) - for i := uint32(0); i < nrShards; i++ { - hdr := base.LastNotarizedHdrForShard(i) - assert.Equal(t, genesisBlcks[i], hdr) - } + assert.Equal(t, process.ErrEpochDoesNotMatch, err) } diff --git a/process/block/bootstrapStorage/bootstrapStorer.go b/process/block/bootstrapStorage/bootstrapStorer.go index 129ffd7522a..7e4052e33d3 100644 --- a/process/block/bootstrapStorage/bootstrapStorer.go +++ b/process/block/bootstrapStorage/bootstrapStorer.go @@ -3,6 +3,7 @@ package bootstrapStorage import ( "errors" "strconv" + "sync/atomic" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/marshal" @@ -33,12 +34,12 @@ type BootstrapHeaderInfo struct { // BootstrapData is used to store information that are needed for bootstrap type BootstrapData struct { - LastHeader BootstrapHeaderInfo - LastNotarizedHeaders []BootstrapHeaderInfo - LastFinals []BootstrapHeaderInfo - ProcessedMiniBlocks []MiniBlocksInMeta - HighestFinalNonce uint64 - LastRound int64 + LastHeader BootstrapHeaderInfo + LastCrossNotarizedHeaders []BootstrapHeaderInfo + LastSelfNotarizedHeaders []BootstrapHeaderInfo + ProcessedMiniBlocks []MiniBlocksInMeta + HighestFinalBlockNonce uint64 + LastRound int64 } type bootstrapStorer struct { @@ -70,7 +71,7 @@ func NewBootstrapStorer( // Put will save bootData in storage func (bs *bootstrapStorer) Put(round int64, bootData BootstrapData) error { - bootData.LastRound = bs.lastRound + bootData.LastRound = atomic.LoadInt64(&bs.lastRound) // save bootstrap round information bootDataBytes, err := bs.marshalizer.Marshal(&bootData) @@ -95,7 +96,7 @@ func (bs *bootstrapStorer) Put(round int64, bootData BootstrapData) error { return err } - bs.lastRound = round + atomic.StoreInt64(&bs.lastRound, round) return nil } @@ -135,7 +136,7 @@ func (bs *bootstrapStorer) GetHighestRound() int64 { // SaveLastRound will save the last round func (bs *bootstrapStorer) SaveLastRound(round int64) error { - bs.lastRound = round + atomic.StoreInt64(&bs.lastRound, round) // save round with a static key roundBytes, err := bs.marshalizer.Marshal(&round) diff --git a/process/block/bootstrapStorage/bootstrapStorer_test.go b/process/block/bootstrapStorage/bootstrapStorer_test.go index 6669b993b99..afb2dd0a6b2 100644 --- a/process/block/bootstrapStorage/bootstrapStorer_test.go +++ b/process/block/bootstrapStorage/bootstrapStorer_test.go @@ -50,9 +50,9 @@ func TestBootstrapStorer_PutAndGet(t *testing.T) { headerInfo := bootstrapStorage.BootstrapHeaderInfo{2, 3, []byte("Hash")} dataBoot := bootstrapStorage.BootstrapData{ - LastHeader: headerInfo, - LastNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{headerInfo}, - LastFinals: []bootstrapStorage.BootstrapHeaderInfo{headerInfo}, + LastHeader: headerInfo, + LastCrossNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{headerInfo}, + LastSelfNotarizedHeaders: []bootstrapStorage.BootstrapHeaderInfo{headerInfo}, } err := bt.Put(round, dataBoot) diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index af7997f2ec8..9ba860db9a6 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -91,6 +91,7 @@ func (txc *transactionCounter) displayLogInfo( selfId uint32, dataPool dataRetriever.PoolsHolder, appStatusHandler core.AppStatusHandler, + blockTracker process.BlockTracker, ) { dispHeader, dispLines := txc.createDisplayableShardHeaderAndBlockBody(header, body) @@ -115,6 +116,8 @@ func (txc *transactionCounter) displayLogInfo( } txc.mutex.RUnlock() log.Debug(message, arguments...) + + blockTracker.DisplayTrackedHeaders() } func (txc *transactionCounter) createDisplayableShardHeaderAndBlockBody( diff --git a/process/block/displayMetaBlock.go b/process/block/displayMetaBlock.go index 189c08c3ee7..4e294aa2565 100644 --- a/process/block/displayMetaBlock.go +++ b/process/block/displayMetaBlock.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/process" ) type headersCounter struct { @@ -26,8 +27,13 @@ func NewHeaderCounter() *headersCounter { func (hc *headersCounter) subtractRestoredMBHeaders(numMiniBlockHeaders int) { hc.shardMBHeaderCounterMutex.Lock() + defer hc.shardMBHeaderCounterMutex.Unlock() + if hc.shardMBHeadersTotalProcessed < uint64(numMiniBlockHeaders) { + hc.shardMBHeadersTotalProcessed = 0 + return + } + hc.shardMBHeadersTotalProcessed -= uint64(numMiniBlockHeaders) - hc.shardMBHeaderCounterMutex.Unlock() } func (hc *headersCounter) countShardMBHeaders(numShardMBHeaders int) { @@ -53,6 +59,7 @@ func (hc *headersCounter) displayLogInfo( body block.Body, headerHash []byte, numHeadersFromPool int, + blockTracker process.BlockTracker, ) { hc.calculateNumOfShardMBHeaders(header) @@ -75,6 +82,8 @@ func (hc *headersCounter) displayLogInfo( hc.shardMBHeaderCounterMutex.RUnlock() log.Debug(message, arguments...) + + blockTracker.DisplayTrackedHeaders() } func (hc *headersCounter) createDisplayableMetaHeader( diff --git a/process/block/export_test.go b/process/block/export_test.go index 24068f4488d..4138273eddb 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -32,12 +33,12 @@ func (bp *baseProcessor) CheckBlockValidity( return bp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) } -func (sp *shardProcessor) ReceivedMetaBlock(metaBlockHash []byte) { - sp.receivedMetaBlock(metaBlockHash) +func (sp *shardProcessor) ReceivedMetaBlock(header data.HeaderHandler, metaBlockHash []byte) { + sp.receivedMetaBlock(header, metaBlockHash) } -func (sp *shardProcessor) CreateMiniBlocks(maxItemsInBlock uint32, round uint64, haveTime func() bool) (block.Body, error) { - return sp.createMiniBlocks(maxItemsInBlock, round, haveTime) +func (sp *shardProcessor) CreateMiniBlocks(maxItemsInBlock uint32, haveTime func() bool) (block.Body, error) { + return sp.createMiniBlocks(maxItemsInBlock, haveTime) } func (sp *shardProcessor) GetOrderedProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { @@ -56,6 +57,13 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo shardCoordinator, nodesCoordinator, ) + + argsHeaderValidator := ArgsHeaderValidator{ + Hasher: &mock.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + } + headerValidator, _ := NewHeaderValidator(argsHeaderValidator) + arguments := ArgShardProcessor{ ArgBaseProcessor: ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, @@ -67,18 +75,20 @@ func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlo NodesCoordinator: nodesCoordinator, SpecialAddressHandler: specialAddressHandler, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - StartHeaders: genesisBlocks, - RequestHandler: &mock.RequestHandlerMock{}, + RequestHandler: &mock.RequestHandlerStub{}, Core: &mock.ServiceContainerMock{}, BlockChainHook: &mock.BlockChainHookHandlerMock{}, TxCoordinator: &mock.TransactionCoordinatorMock{}, ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorMock{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + HeaderValidator: headerValidator, Rounder: &mock.RounderMock{}, BootStorer: &mock.BoostrapStorerMock{ PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { return nil }, }, + BlockTracker: mock.NewBlockTrackerMock(shardCoordinator, genesisBlocks), }, DataPool: tdp, TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, @@ -95,8 +105,8 @@ func (mp *metaProcessor) RemoveBlockInfoFromPool(header *block.MetaBlock) error return mp.removeBlockInfoFromPool(header) } -func (mp *metaProcessor) ReceivedShardHeader(shardHeaderHash []byte) { - mp.receivedShardHeader(shardHeaderHash) +func (mp *metaProcessor) ReceivedShardHeader(header data.HeaderHandler, shardHeaderHash []byte) { + mp.receivedShardHeader(header, shardHeaderHash) } func (mp *metaProcessor) AddHdrHashToRequestedList(hdr *block.Header, hdrHash []byte) { @@ -135,6 +145,14 @@ func (mp *metaProcessor) ProcessBlockHeaders(header *block.MetaBlock, round uint return mp.processBlockHeaders(header, round, haveTime) } +func (mp *metaProcessor) CreateEpochStartForMetablock() (*block.EpochStart, error) { + return mp.createEpochStartForMetablock() +} + +func (mp *metaProcessor) GetLastFinalizedMetaHashForShard(shardHdr *block.Header) ([]byte, []byte, error) { + return mp.getLastFinalizedMetaHashForShard(shardHdr) +} + func (mp *metaProcessor) RequestMissingFinalityAttestingShardHeaders() uint32 { mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() @@ -143,15 +161,29 @@ func (mp *metaProcessor) RequestMissingFinalityAttestingShardHeaders() uint32 { } func (bp *baseProcessor) NotarizedHdrs() map[uint32][]data.HeaderHandler { - return bp.notarizedHdrs -} + lastCrossNotarizedHeaders := make(map[uint32][]data.HeaderHandler) + for shardID := uint32(0); shardID < bp.shardCoordinator.NumberOfShards(); shardID++ { + lastCrossNotarizedHeaderForShard := bp.LastNotarizedHdrForShard(shardID) + if !check.IfNil(lastCrossNotarizedHeaderForShard) { + lastCrossNotarizedHeaders[shardID] = append(lastCrossNotarizedHeaders[shardID], lastCrossNotarizedHeaderForShard) + } + } + + lastCrossNotarizedHeaderForShard := bp.LastNotarizedHdrForShard(sharding.MetachainShardId) + if !check.IfNil(lastCrossNotarizedHeaderForShard) { + lastCrossNotarizedHeaders[sharding.MetachainShardId] = append(lastCrossNotarizedHeaders[sharding.MetachainShardId], lastCrossNotarizedHeaderForShard) + } -func (bp *baseProcessor) LastNotarizedHdrForShard(shardId uint32) data.HeaderHandler { - return bp.lastNotarizedHdrForShard(shardId) + return lastCrossNotarizedHeaders } -func (bp *baseProcessor) RemoveLastNotarized() { - bp.removeLastNotarized() +func (bp *baseProcessor) LastNotarizedHdrForShard(shardID uint32) data.HeaderHandler { + lastCrossNotarizedHeaderForShard, _, _ := bp.blockTracker.GetLastCrossNotarizedHeader(shardID) + if check.IfNil(lastCrossNotarizedHeaderForShard) { + return nil + } + + return lastCrossNotarizedHeaderForShard } func (bp *baseProcessor) SetMarshalizer(marshal marshal.Marshalizer) { @@ -162,6 +194,10 @@ func (bp *baseProcessor) SetHasher(hasher hashing.Hasher) { bp.hasher = hasher } +func (bp *baseProcessor) SetHeaderValidator(validator process.HeaderConstructionValidator) { + bp.headerValidator = validator +} + func (mp *metaProcessor) SetShardBlockFinality(val uint32) { mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() mp.shardBlockFinality = val @@ -172,8 +208,8 @@ func (mp *metaProcessor) SaveLastNotarizedHeader(header *block.MetaBlock) error return mp.saveLastNotarizedHeader(header) } -func (mp *metaProcessor) CheckShardHeadersValidity() (map[uint32]data.HeaderHandler, error) { - return mp.checkShardHeadersValidity() +func (mp *metaProcessor) CheckShardHeadersValidity(header *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { + return mp.checkShardHeadersValidity(header) } func (mp *metaProcessor) CheckShardHeadersFinality(highestNonceHdrs map[uint32]data.HeaderHandler) error { @@ -181,11 +217,7 @@ func (mp *metaProcessor) CheckShardHeadersFinality(highestNonceHdrs map[uint32]d } func (bp *baseProcessor) IsHdrConstructionValid(currHdr, prevHdr data.HeaderHandler) error { - return bp.isHdrConstructionValid(currHdr, prevHdr) -} - -func (mp *metaProcessor) IsShardHeaderValidFinal(currHdr *block.Header, lastHdr *block.Header, sortedShardHdrs []*block.Header) (bool, []uint32) { - return mp.isShardHeaderValidFinal(currHdr, lastHdr, sortedShardHdrs) + return bp.headerValidator.IsHeaderConstructionValid(currHdr, prevHdr) } func (mp *metaProcessor) ChRcvAllHdrs() chan bool { @@ -204,26 +236,18 @@ func NewBaseProcessor(shardCord sharding.Coordinator) *baseProcessor { return &baseProcessor{shardCoordinator: shardCord} } -func (bp *baseProcessor) SaveLastNotarizedHeader(shardId uint32, processedHdrs []data.HeaderHandler) error { - return bp.saveLastNotarizedHeader(shardId, processedHdrs) +func (sp *shardProcessor) SaveLastNotarizedHeader(shardId uint32, processedHdrs []data.HeaderHandler) error { + return sp.saveLastNotarizedHeader(shardId, processedHdrs) } func (sp *shardProcessor) CheckHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { return sp.checkHeaderBodyCorrelation(hdr.MiniBlockHeaders, body) } -func (bp *baseProcessor) SetLastNotarizedHeadersSlice(startHeaders map[uint32]data.HeaderHandler) error { - return bp.setLastNotarizedHeadersSlice(startHeaders) -} - func (sp *shardProcessor) CheckAndRequestIfMetaHeadersMissing(round uint64) { sp.checkAndRequestIfMetaHeadersMissing(round) } -func (sp *shardProcessor) IsMetaHeaderFinal(currHdr data.HeaderHandler, sortedHdrs []*hashAndHdr, startPos int) bool { - return sp.isMetaHeaderFinal(currHdr, sortedHdrs, startPos) -} - func (sp *shardProcessor) GetHashAndHdrStruct(header data.HeaderHandler, hash []byte) *hashAndHdr { return &hashAndHdr{header, hash} } @@ -235,24 +259,18 @@ func (sp *shardProcessor) RequestMissingFinalityAttestingHeaders() uint32 { return sp.requestMissingFinalityAttestingHeaders( sharding.MetachainShardId, sp.metaBlockFinality, - sp.getMetaHeaderFromPoolWithNonce, - sp.dataPool.MetaBlocks()) + ) } func (sp *shardProcessor) CheckMetaHeadersValidityAndFinality() error { return sp.checkMetaHeadersValidityAndFinality() } -func (sp *shardProcessor) GetOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { - return sp.getOrderedMetaBlocks(round) -} - func (sp *shardProcessor) CreateAndProcessCrossMiniBlocksDstMe( maxItemsInBlock uint32, - round uint64, haveTime func() bool, ) (block.MiniBlockSlice, uint32, uint32, error) { - return sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, round, haveTime) + return sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, haveTime) } func (bp *baseProcessor) SetBlockSizeThrottler(blockSizeThrottler process.BlockSizeThrottler) { @@ -267,8 +285,9 @@ func (sp *shardProcessor) DisplayLogInfo( selfId uint32, dataPool dataRetriever.PoolsHolder, statusHandler core.AppStatusHandler, + blockTracker process.BlockTracker, ) { - sp.txCounter.displayLogInfo(header, body, headerHash, numShards, selfId, dataPool, statusHandler) + sp.txCounter.displayLogInfo(header, body, headerHash, numShards, selfId, dataPool, statusHandler, blockTracker) } func (sp *shardProcessor) GetHighestHdrForOwnShardFromMetachain(processedHdrs []data.HeaderHandler) ([]data.HeaderHandler, [][]byte, error) { @@ -288,14 +307,6 @@ func (sp *shardProcessor) GetAllMiniBlockDstMeFromMeta( return sp.getAllMiniBlockDstMeFromMeta(header) } -func (sp *shardProcessor) IsMiniBlockProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { - return sp.isMiniBlockProcessed(metaBlockHash, miniBlockHash) -} - -func (sp *shardProcessor) AddProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { - sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) -} - func (bp *baseProcessor) SetHdrForCurrentBlock(headerHash []byte, headerHandler data.HeaderHandler, usedInBlock bool) { bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() bp.hdrsForCurrBlock.hdrHashAndInfo[string(headerHash)] = &hdrInfo{hdr: headerHandler, usedInBlock: usedInBlock} diff --git a/process/block/headerValidator.go b/process/block/headerValidator.go new file mode 100644 index 00000000000..726920ba344 --- /dev/null +++ b/process/block/headerValidator.go @@ -0,0 +1,94 @@ +package block + +import ( + "bytes" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" +) + +// ArgsHeaderValidator are the arguments needed to create a new header validator +type ArgsHeaderValidator struct { + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer +} + +type headerValidator struct { + hasher hashing.Hasher + marshalizer marshal.Marshalizer +} + +// NewHeaderValidator returns a new header validator +func NewHeaderValidator(args ArgsHeaderValidator) (*headerValidator, error) { + if check.IfNil(args.Hasher) { + return nil, process.ErrNilHasher + } + if check.IfNil(args.Marshalizer) { + return nil, process.ErrNilMarshalizer + } + + return &headerValidator{ + hasher: args.Hasher, + marshalizer: args.Marshalizer, + }, nil +} + +// IsHeaderConstructionValid verified if header is constructed correctly on top of other +func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data.HeaderHandler) error { + if check.IfNil(prevHeader) { + return process.ErrNilBlockHeader + } + if check.IfNil(currHeader) { + return process.ErrNilBlockHeader + } + + if prevHeader.GetRound() >= currHeader.GetRound() { + log.Trace("round does not match", + "shard", currHeader.GetShardID(), + "local header round", prevHeader.GetRound(), + "received round", currHeader.GetRound()) + return process.ErrLowerRoundInBlock + } + + if currHeader.GetNonce() != prevHeader.GetNonce()+1 { + log.Trace("nonce does not match", + "shard", currHeader.GetShardID(), + "local header nonce", prevHeader.GetNonce(), + "received nonce", currHeader.GetNonce()) + return process.ErrWrongNonceInBlock + } + + prevHeaderHash, err := core.CalculateHash(h.marshalizer, h.hasher, prevHeader) + if err != nil { + return err + } + + if !bytes.Equal(currHeader.GetPrevHash(), prevHeaderHash) { + log.Trace("header hash does not match", + "shard", currHeader.GetShardID(), + "local header hash", prevHeaderHash, + "received header with prev hash", currHeader.GetPrevHash(), + ) + return process.ErrBlockHashDoesNotMatch + } + + if !bytes.Equal(currHeader.GetPrevRandSeed(), prevHeader.GetRandSeed()) { + log.Trace("header random seed does not match", + "shard", currHeader.GetShardID(), + "local header random seed", prevHeader.GetRandSeed(), + "received header with prev random seed", currHeader.GetPrevRandSeed(), + ) + return process.ErrRandSeedDoesNotMatch + } + + return nil +} + +// IsInterfaceNil returns if underlying object is true +func (h *headerValidator) IsInterfaceNil() bool { + return h == nil +} diff --git a/process/block/metablock.go b/process/block/metablock.go index 1e142626ddb..646573cdace 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -3,7 +3,6 @@ package block import ( "bytes" "fmt" - "sort" "sync" "time" @@ -13,10 +12,10 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -25,11 +24,12 @@ import ( // metaProcessor implements metaProcessor interface and actually it tries to execute block type metaProcessor struct { *baseProcessor - core serviceContainer.Core - dataPool dataRetriever.MetaPoolsHolder - scDataGetter external.SCQueryService - scToProtocol process.SmartContractToProtocolHandler - peerChanges process.PeerChangesHandler + core serviceContainer.Core + dataPool dataRetriever.MetaPoolsHolder + scDataGetter external.SCQueryService + scToProtocol process.SmartContractToProtocolHandler + peerChanges process.PeerChangesHandler + pendingMiniBlocks process.PendingMiniBlocksHandler shardsHeadersNonce *sync.Map shardBlockFinality uint32 @@ -43,22 +43,24 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { if err != nil { return nil, err } - - if arguments.DataPool == nil || arguments.DataPool.IsInterfaceNil() { + if check.IfNil(arguments.DataPool) { return nil, process.ErrNilDataPoolHolder } - if arguments.DataPool.ShardHeaders() == nil || arguments.DataPool.ShardHeaders().IsInterfaceNil() { + if check.IfNil(arguments.DataPool.Headers()) { return nil, process.ErrNilHeadersDataPool } - if arguments.SCDataGetter == nil || arguments.SCDataGetter.IsInterfaceNil() { + if check.IfNil(arguments.SCDataGetter) { return nil, process.ErrNilSCDataGetter } - if arguments.PeerChangesHandler == nil || arguments.PeerChangesHandler.IsInterfaceNil() { + if check.IfNil(arguments.PeerChangesHandler) { return nil, process.ErrNilPeerChangesHandler } - if arguments.SCToProtocol == nil || arguments.SCToProtocol.IsInterfaceNil() { + if check.IfNil(arguments.SCToProtocol) { return nil, process.ErrNilSCToProtocol } + if check.IfNil(arguments.PendingMiniBlocks) { + return nil, process.ErrNilPendingMiniBlocksHandler + } blockSizeThrottler, err := throttle.NewBlockSizeThrottle() if err != nil { @@ -66,39 +68,37 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { } base := &baseProcessor{ - accounts: arguments.Accounts, - blockSizeThrottler: blockSizeThrottler, - forkDetector: arguments.ForkDetector, - hasher: arguments.Hasher, - marshalizer: arguments.Marshalizer, - store: arguments.Store, - shardCoordinator: arguments.ShardCoordinator, - nodesCoordinator: arguments.NodesCoordinator, - specialAddressHandler: arguments.SpecialAddressHandler, - uint64Converter: arguments.Uint64Converter, - onRequestHeaderHandler: arguments.RequestHandler.RequestHeader, - onRequestHeaderHandlerByNonce: arguments.RequestHandler.RequestHeaderByNonce, - appStatusHandler: statusHandler.NewNilStatusHandler(), - blockChainHook: arguments.BlockChainHook, - txCoordinator: arguments.TxCoordinator, - validatorStatisticsProcessor: arguments.ValidatorStatisticsProcessor, - rounder: arguments.Rounder, - bootStorer: arguments.BootStorer, - } - - err = base.setLastNotarizedHeadersSlice(arguments.StartHeaders) - if err != nil { - return nil, err + accounts: arguments.Accounts, + blockSizeThrottler: blockSizeThrottler, + forkDetector: arguments.ForkDetector, + hasher: arguments.Hasher, + marshalizer: arguments.Marshalizer, + store: arguments.Store, + shardCoordinator: arguments.ShardCoordinator, + nodesCoordinator: arguments.NodesCoordinator, + specialAddressHandler: arguments.SpecialAddressHandler, + uint64Converter: arguments.Uint64Converter, + requestHandler: arguments.RequestHandler, + appStatusHandler: statusHandler.NewNilStatusHandler(), + blockChainHook: arguments.BlockChainHook, + txCoordinator: arguments.TxCoordinator, + validatorStatisticsProcessor: arguments.ValidatorStatisticsProcessor, + epochStartTrigger: arguments.EpochStartTrigger, + headerValidator: arguments.HeaderValidator, + rounder: arguments.Rounder, + bootStorer: arguments.BootStorer, + blockTracker: arguments.BlockTracker, } mp := metaProcessor{ - core: arguments.Core, - baseProcessor: base, - dataPool: arguments.DataPool, - headersCounter: NewHeaderCounter(), - scDataGetter: arguments.SCDataGetter, - peerChanges: arguments.PeerChangesHandler, - scToProtocol: arguments.SCToProtocol, + core: arguments.Core, + baseProcessor: base, + dataPool: arguments.DataPool, + headersCounter: NewHeaderCounter(), + scDataGetter: arguments.SCDataGetter, + peerChanges: arguments.PeerChangesHandler, + scToProtocol: arguments.SCToProtocol, + pendingMiniBlocks: arguments.PendingMiniBlocks, } mp.baseProcessor.requestBlockBodyHandler = &mp @@ -106,17 +106,15 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { mp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) mp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) - headerPool := mp.dataPool.ShardHeaders() + headerPool := mp.dataPool.Headers() headerPool.RegisterHandler(mp.receivedShardHeader) mp.chRcvAllHdrs = make(chan bool) - mp.shardBlockFinality = process.ShardBlockFinality + mp.shardBlockFinality = process.BlockFinality mp.shardsHeadersNonce = &sync.Map{} - mp.lastHdrs = make(mapShardHeader) - return &mp, nil } @@ -140,7 +138,7 @@ func (mp *metaProcessor) ProcessBlock( "for shard", headerHandler.GetShardID(), ) - go mp.onRequestHeaderHandler(headerHandler.GetShardID(), headerHandler.GetPrevHash()) + go mp.requestHandler.RequestMetaHeader(headerHandler.GetPrevHash()) } return err @@ -169,7 +167,7 @@ func (mp *metaProcessor) ProcessBlock( header, mp.marshalizer, mp.appStatusHandler, - mp.dataPool.ShardHeaders().Len(), + mp.dataPool.Headers().Len(), mp.headersCounter.getNumShardMBHeadersTotalProcessed(), ) @@ -224,7 +222,19 @@ func (mp *metaProcessor) ProcessBlock( go mp.checkAndRequestIfShardHeadersMissing(header.Round) }() - highestNonceHdrs, err := mp.checkShardHeadersValidity() + mp.epochStartTrigger.Update(header.GetRound()) + + err = mp.checkEpochCorrectness(header, chainHandler) + if err != nil { + return err + } + + err = mp.verifyEpochStartDataForMetablock(header) + if err != nil { + return err + } + + highestNonceHdrs, err := mp.checkShardHeadersValidity(header) if err != nil { return err } @@ -255,7 +265,7 @@ func (mp *metaProcessor) ProcessBlock( return err } - err = mp.txCoordinator.VerifyCreatedBlockTransactions(body) + err = mp.txCoordinator.VerifyCreatedBlockTransactions(header, body) if err != nil { return err } @@ -281,6 +291,10 @@ func (mp *metaProcessor) ProcessBlock( } if !bytes.Equal(validatorStatsRH, header.GetValidatorStatsRootHash()) { + log.Debug("validator stats root hash mismatch", + "computed", validatorStatsRH, + "received", header.GetValidatorStatsRootHash(), + ) err = process.ErrValidatorStatsRootHashDoesNotMatch return err } @@ -293,6 +307,32 @@ func (mp *metaProcessor) SetNumProcessedObj(numObj uint64) { mp.headersCounter.shardMBHeadersTotalProcessed = numObj } +func (mp *metaProcessor) checkEpochCorrectness( + headerHandler data.HeaderHandler, + chainHandler data.ChainHandler, +) error { + currentBlockHeader := chainHandler.GetCurrentBlockHeader() + if currentBlockHeader == nil { + return nil + } + + isEpochIncorrect := headerHandler.GetEpoch() != currentBlockHeader.GetEpoch() && + mp.epochStartTrigger.Epoch() == currentBlockHeader.GetEpoch() + if isEpochIncorrect { + return process.ErrEpochDoesNotMatch + } + + isEpochIncorrect = mp.epochStartTrigger.IsEpochStart() && + mp.epochStartTrigger.EpochStartRound() <= headerHandler.GetRound() && + headerHandler.GetEpoch() != currentBlockHeader.GetEpoch()+1 + + if isEpochIncorrect { + return process.ErrEpochDoesNotMatch + } + + return nil +} + func (mp *metaProcessor) verifyCrossShardMiniBlockDstMe(header *block.MetaBlock) error { miniBlockShardsHashes, err := mp.getAllMiniBlockDstMeFromShards(header) if err != nil { @@ -330,7 +370,7 @@ func (mp *metaProcessor) getAllMiniBlockDstMeFromShards(metaHdr *block.MetaBlock continue } - lastHdr, err := mp.getLastNotarizedHdr(shardInfo.ShardID) + lastCrossNotarizedHeader, _, err := mp.blockTracker.GetLastCrossNotarizedHeader(shardInfo.ShardID) if err != nil { return nil, err } @@ -338,10 +378,10 @@ func (mp *metaProcessor) getAllMiniBlockDstMeFromShards(metaHdr *block.MetaBlock if shardHeader.GetRound() > metaHdr.Round { continue } - if shardHeader.GetRound() <= lastHdr.GetRound() { + if shardHeader.GetRound() <= lastCrossNotarizedHeader.GetRound() { continue } - if shardHeader.GetNonce() <= lastHdr.GetNonce() { + if shardHeader.GetNonce() <= lastCrossNotarizedHeader.GetNonce() { continue } @@ -354,28 +394,13 @@ func (mp *metaProcessor) getAllMiniBlockDstMeFromShards(metaHdr *block.MetaBlock return miniBlockShardsHashes, nil } -// SetConsensusData - sets the reward addresses for the current consensus group -func (mp *metaProcessor) SetConsensusData(_ []byte, _ uint64, _ uint32, _ uint32) { - // nothing to do -} - func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { - _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) - if err != nil { - log.Trace("getOrderedHdrs", "error", err.Error()) - return - } + orderedHdrsPerShard := mp.blockTracker.GetTrackedHeadersForAllShards() for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - // map from *block.Header to dataHandler - sortedHdrs := make([]data.HeaderHandler, len(sortedHdrPerShard[i])) - for j := 0; j < len(sortedHdrPerShard[i]); j++ { - sortedHdrs[j] = sortedHdrPerShard[i][j] - } - - err := mp.requestHeadersIfMissing(sortedHdrs, i, round, mp.dataPool.ShardHeaders()) + err := mp.requestHeadersIfMissing(orderedHdrsPerShard[i], i, round, mp.dataPool.Headers().MaxSize()) if err != nil { - log.Trace("requestHeadersIfMissing", "error", err.Error()) + log.Trace("checkAndRequestIfShardHeadersMissing", "error", err.Error()) continue } } @@ -421,33 +446,16 @@ func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error return process.ErrNilMetaBlockHeader } - headerPool := mp.dataPool.ShardHeaders() + headerPool := mp.dataPool.Headers() if headerPool == nil || headerPool.IsInterfaceNil() { return process.ErrNilHeadersDataPool } - headerNoncesPool := mp.dataPool.HeadersNonces() - if headerNoncesPool == nil || headerNoncesPool.IsInterfaceNil() { - return process.ErrNilHeadersNoncesDataPool - } - mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for i := 0; i < len(header.ShardInfo); i++ { shardHeaderHash := header.ShardInfo[i].HeaderHash - headerInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] - if !ok { - mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return process.ErrMissingHeader - } - - shardBlock, ok := headerInfo.hdr.(*block.Header) - if !ok { - mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return process.ErrWrongTypeAssertion - } - headerPool.Remove(shardHeaderHash) - headerNoncesPool.Remove(shardBlock.Nonce, shardBlock.ShardId) + headerPool.RemoveHeaderByHash(shardHeaderHash) } mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() @@ -473,21 +481,25 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, return process.ErrWrongTypeAssertion } - headerPool := mp.dataPool.ShardHeaders() + headerPool := mp.dataPool.Headers() if check.IfNil(headerPool) { return process.ErrNilHeadersDataPool } - headerNoncesPool := mp.dataPool.HeadersNonces() - if check.IfNil(headerNoncesPool) { - return process.ErrNilHeadersNoncesDataPool - } - hdrHashes := make([][]byte, len(metaBlock.ShardInfo)) for i := 0; i < len(metaBlock.ShardInfo); i++ { hdrHashes[i] = metaBlock.ShardInfo[i].HeaderHash } + err := mp.pendingMiniBlocks.RevertHeader(metaBlock) + if err != nil { + return err + } + + if metaBlock.IsStartOfEpochBlock() { + mp.epochStartTrigger.Revert() + } + for _, hdrHash := range hdrHashes { shardHeader, errNotCritical := process.GetShardHeaderFromStorage(hdrHash, mp.marshalizer, mp.store) if errNotCritical != nil { @@ -497,10 +509,7 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, continue } - headerPool.Put(hdrHash, shardHeader) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardHeader.GetShardID(), hdrHash) - headerNoncesPool.Merge(shardHeader.GetNonce(), syncMap) + headerPool.AddHeader(hdrHash, shardHeader) hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardHeader.GetShardID()) storer := mp.store.GetStorer(hdrNonceHashDataUnit) @@ -518,7 +527,7 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, log.Debug("RestoreBlockDataFromStorage", "error", errNotCritical.Error()) } - mp.removeLastNotarized() + mp.blockTracker.RemoveLastNotarizedHeaders() return nil } @@ -530,9 +539,13 @@ func (mp *metaProcessor) CreateBlockBody(initialHdrData data.HeaderHandler, have ) mp.createBlockStarted() mp.blockSizeThrottler.ComputeMaxItems() + + mp.epochStartTrigger.Update(initialHdrData.GetRound()) + initialHdrData.SetEpoch(mp.epochStartTrigger.Epoch()) + mp.blockChainHook.SetCurrentHeader(initialHdrData) - miniBlocks, err := mp.createMiniBlocks(mp.blockSizeThrottler.MaxItemsToAdd(), initialHdrData.GetRound(), haveTime) + miniBlocks, err := mp.createMiniBlocks(mp.blockSizeThrottler.MaxItemsToAdd(), haveTime) if err != nil { return nil, err } @@ -547,11 +560,13 @@ func (mp *metaProcessor) CreateBlockBody(initialHdrData data.HeaderHandler, have func (mp *metaProcessor) createMiniBlocks( maxItemsInBlock uint32, - round uint64, haveTime func() bool, ) (block.Body, error) { miniBlocks := make(block.Body, 0) + if mp.epochStartTrigger.IsEpochStart() { + return miniBlocks, nil + } if mp.accounts.JournalLen() != 0 { return nil, process.ErrAccountStateDirty @@ -567,7 +582,7 @@ func (mp *metaProcessor) createMiniBlocks( return nil, process.ErrNilTransactionPool } - destMeMiniBlocks, nbTxs, nbHdrs, err := mp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, round, haveTime) + destMeMiniBlocks, nbTxs, nbHdrs, err := mp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, haveTime) if err != nil { log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) } @@ -606,16 +621,18 @@ func (mp *metaProcessor) createMiniBlocks( // full verification through metachain header func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( maxItemsInBlock uint32, - round uint64, haveTime func() bool, ) (block.MiniBlockSlice, uint32, uint32, error) { miniBlocks := make(block.MiniBlockSlice, 0) txsAdded := uint32(0) hdrsAdded := uint32(0) - lastPushedHdr := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - orderedHdrs, orderedHdrHashes, sortedHdrPerShard, err := mp.getOrderedHdrs(round) + sw := core.NewStopWatch() + sw.Start("ComputeLongestShardsChainsFromLastNotarized") + orderedHdrs, orderedHdrsHashes, _, err := mp.blockTracker.ComputeLongestShardsChainsFromLastNotarized() + sw.Stop("ComputeLongestShardsChainsFromLastNotarized") + log.Debug("measurements ComputeLongestShardsChainsFromLastNotarized", sw.GetMeasurements()...) if err != nil { return nil, 0, 0, err } @@ -624,16 +641,12 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( "num shard headers", len(orderedHdrs), ) - // save last committed header for verification - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, 0, 0, process.ErrNotarizedHdrsSliceIsNil - } - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - lastPushedHdr[shardId] = mp.lastNotarizedHdrForShard(shardId) + lastShardHdr, err := mp.blockTracker.GetLastCrossNotarizedHeadersForAllShards() + if err != nil { + return nil, 0, 0, err } - mp.mutNotarizedHdrs.RUnlock() + + hdrsAddedForShard := make(map[uint32]uint32) mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(orderedHdrs); i++ { @@ -659,21 +672,20 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( break } - hdr := orderedHdrs[i] - lastHdr, ok := lastPushedHdr[hdr.ShardId].(*block.Header) - if !ok { + currShardHdr := orderedHdrs[i] + if currShardHdr.GetNonce() > lastShardHdr[currShardHdr.GetShardID()].GetNonce()+1 { + log.Debug("skip searching", + "shard", currShardHdr.GetShardID(), + "last shard hdr nonce", lastShardHdr[currShardHdr.GetShardID()].GetNonce(), + "curr shard hdr nonce", currShardHdr.GetNonce()) continue } - isFinal, _ := mp.isShardHeaderValidFinal(hdr, lastHdr, sortedHdrPerShard[hdr.ShardId]) - if !isFinal { - continue - } - - if len(hdr.GetMiniBlockHeadersWithDst(mp.shardCoordinator.SelfId())) == 0 { - mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[i])] = &hdrInfo{hdr: hdr, usedInBlock: true} + if len(currShardHdr.GetMiniBlockHeadersWithDst(mp.shardCoordinator.SelfId())) == 0 { + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrsHashes[i])] = &hdrInfo{hdr: currShardHdr, usedInBlock: true} hdrsAdded++ - lastPushedHdr[hdr.ShardId] = hdr + hdrsAddedForShard[currShardHdr.GetShardID()]++ + lastShardHdr[currShardHdr.GetShardID()] = currShardHdr continue } @@ -691,13 +703,18 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { snapshot := mp.accounts.JournalLen() currMBProcessed, currTxsAdded, hdrProcessFinished := mp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( - hdr, + currShardHdr, nil, uint32(maxTxSpaceRemained), uint32(maxMbSpaceRemained), haveTime) if !hdrProcessFinished { + log.Debug("shard header cannot be fully processed", + "round", currShardHdr.GetRound(), + "nonce", currShardHdr.GetNonce(), + "hash", orderedHdrsHashes[i]) + // shard header must be processed completely errAccountState := mp.accounts.RevertToSnapshot(snapshot) if errAccountState != nil { @@ -711,17 +728,40 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( miniBlocks = append(miniBlocks, currMBProcessed...) txsAdded = txsAdded + currTxsAdded - mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[i])] = &hdrInfo{hdr: hdr, usedInBlock: true} + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrsHashes[i])] = &hdrInfo{hdr: currShardHdr, usedInBlock: true} hdrsAdded++ + hdrsAddedForShard[currShardHdr.GetShardID()]++ - lastPushedHdr[hdr.ShardId] = hdr + lastShardHdr[currShardHdr.GetShardID()] = currShardHdr } } mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + mp.requestShardHeadersIfNeeded(hdrsAddedForShard, lastShardHdr) + return miniBlocks, txsAdded, hdrsAdded, nil } +func (mp *metaProcessor) requestShardHeadersIfNeeded( + hdrsAddedForShard map[uint32]uint32, + lastShardHdr map[uint32]data.HeaderHandler, +) { + for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { + log.Debug("shard hdrs added", + "shard", shardID, + "nb", hdrsAddedForShard[shardID], + "lastShardHdr", lastShardHdr[shardID].GetNonce()) + + if hdrsAddedForShard[shardID] == 0 { + fromNonce := lastShardHdr[shardID].GetNonce() + 1 + toNonce := fromNonce + uint64(mp.shardBlockFinality) + for nonce := fromNonce; nonce <= toNonce; nonce++ { + go mp.requestHandler.RequestShardHeaderByNonce(shardID, nonce) + } + } + } +} + func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round uint64, haveTime func() time.Duration) error { arguments := make([]interface{}, 0, len(header.ShardInfo)) for i := 0; i < len(header.ShardInfo); i++ { @@ -807,20 +847,13 @@ func (mp *metaProcessor) CommitBlock( log.Trace("MetaBlockUnit store.Put", "error", errNotCritical.Error()) } - headersNoncesPool := mp.dataPool.HeadersNonces() - if headersNoncesPool == nil { - err = process.ErrNilHeadersNoncesDataPool - return err - } - - metaBlocksPool := mp.dataPool.MetaBlocks() + metaBlocksPool := mp.dataPool.Headers() if metaBlocksPool == nil { err = process.ErrNilMetaBlocksPool return err } - headersNoncesPool.Remove(header.GetNonce(), header.GetShardID()) - metaBlocksPool.Remove(headerHash) + metaBlocksPool.RemoveHeaderByHash(headerHash) body, ok := bodyHandler.(block.Body) if !ok { @@ -885,19 +918,29 @@ func (mp *metaProcessor) CommitBlock( mp.saveMetricCrossCheckBlockHeight() + err = mp.commitAll() + if err != nil { + return err + } + + mp.commitEpochStart(header, chainHandler) + + mp.cleanupBlockTrackerPools(headerHandler) + err = mp.saveLastNotarizedHeader(header) if err != nil { return err } - err = mp.commitAll() + err = mp.pendingMiniBlocks.AddProcessedHeader(header) if err != nil { return err } log.Info("meta block has been committed successfully", - "nonce", header.Nonce, + "epoch", header.Epoch, "round", header.Round, + "nonce", header.Nonce, "hash", headerHash) errNotCritical = mp.removeBlockInfoFromPool(header) @@ -910,18 +953,22 @@ func (mp *metaProcessor) CommitBlock( log.Debug(errNotCritical.Error()) } - errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil, false) + errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil) if errNotCritical != nil { log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) } + mp.blockTracker.AddSelfNotarizedHeader(mp.shardCoordinator.SelfId(), chainHandler.GetCurrentBlockHeader(), chainHandler.GetCurrentBlockHeaderHash()) + + for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { + lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash := mp.getLastSelfNotarizedHeaderForShard(shardID) + mp.blockTracker.AddSelfNotarizedHeader(shardID, lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash) + } + log.Debug("highest final meta block", "nonce", mp.forkDetector.GetHighestFinalBlockNonce(), ) - hdrsToAttestPreviousFinal := mp.shardBlockFinality + 1 - mp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) - lastMetaBlock := chainHandler.GetCurrentBlockHeader() err = chainHandler.SetCurrentBlockBody(body) @@ -948,7 +995,8 @@ func (mp *metaProcessor) CommitBlock( header, body, headerHash, - mp.dataPool.ShardHeaders().Len(), + mp.dataPool.Headers().Len(), + mp.blockTracker, ) headerInfo := bootstrapStorage.BootstrapHeaderInfo{ @@ -956,40 +1004,51 @@ func (mp *metaProcessor) CommitBlock( Nonce: header.GetNonce(), Hash: headerHash, } - mp.prepareDataForBootStorer(headerInfo, header.Round, nil, nil, nil) + mp.prepareDataForBootStorer(headerInfo, header.Round, nil, nil, mp.forkDetector.GetHighestFinalBlockNonce(), nil) mp.blockSizeThrottler.Succeed(header.Round) log.Debug("pools info", - "metablocks", mp.dataPool.MetaBlocks().Len(), - "metablocks capacity", mp.dataPool.MetaBlocks().MaxSize(), - "shard headers", mp.dataPool.ShardHeaders().Len(), - "shard headers capacity", mp.dataPool.ShardHeaders().MaxSize(), + "headers pool", mp.dataPool.Headers().Len(), + "headers pool capacity", mp.dataPool.Headers().MaxSize(), ) - go mp.cleanupPools(headersNoncesPool, metaBlocksPool, mp.dataPool.ShardHeaders()) + go mp.cleanupPools(headerHandler, mp.dataPool.Headers()) return nil } +func (mp *metaProcessor) getLastSelfNotarizedHeaderForShard(shardID uint32) (data.HeaderHandler, []byte) { + //TODO: Implement mechanism to extract last meta header notarized by the given shard if this info will be needed later + return nil, nil +} + // ApplyProcessedMiniBlocks will do nothing on meta processor -func (mp *metaProcessor) ApplyProcessedMiniBlocks(_ map[string]map[string]struct{}) { +func (mp *metaProcessor) ApplyProcessedMiniBlocks(_ *processedMb.ProcessedMiniBlockTracker) { } -func (mp *metaProcessor) getPrevHeader(header *block.MetaBlock) (*block.MetaBlock, error) { - metaBlockStore := mp.store.GetStorer(dataRetriever.MetaBlockUnit) - buff, err := metaBlockStore.Get(header.GetPrevHash()) - if err != nil { - return nil, err +func (mp *metaProcessor) commitEpochStart(header data.HeaderHandler, chainHandler data.ChainHandler) { + if header.IsStartOfEpochBlock() { + mp.epochStartTrigger.SetProcessed(header) + } else { + currentHeader := chainHandler.GetCurrentBlockHeader() + if currentHeader != nil && currentHeader.IsStartOfEpochBlock() { + mp.epochStartTrigger.SetFinalityAttestingRound(header.GetRound()) + } } +} - prevMetaHeader := &block.MetaBlock{} - err = mp.marshalizer.Unmarshal(prevMetaHeader, buff) +// RevertAccountState reverts the account state for cleanup failed process +func (mp *metaProcessor) RevertAccountState() { + err := mp.accounts.RevertToSnapshot(0) if err != nil { - return nil, err + log.Debug("RevertToSnapshot", "error", err.Error()) } - return prevMetaHeader, nil + err = mp.validatorStatisticsProcessor.RevertPeerStateToSnapshot(0) + if err != nil { + log.Debug("RevertPeerStateToSnapshot", "error", err.Error()) + } } func (mp *metaProcessor) updateShardHeadersNonce(key uint32, value uint64) { @@ -1030,16 +1089,14 @@ func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { } func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error { - mp.mutNotarizedHdrs.Lock() - defer mp.mutNotarizedHdrs.Unlock() - - if mp.notarizedHdrs == nil { - return process.ErrNotarizedHdrsSliceIsNil - } + lastCrossNotarizedHeaderForShard := make(map[uint32]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) + for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { + lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash, err := mp.blockTracker.GetLastCrossNotarizedHeader(shardID) + if err != nil { + return err + } - tmpLastNotarizedHdrForShard := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - tmpLastNotarizedHdrForShard[i] = mp.lastNotarizedHdrForShard(i) + lastCrossNotarizedHeaderForShard[shardID] = &hashAndHdr{hdr: lastCrossNotarizedHeader, hash: lastCrossNotarizedHeaderHash} } mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() @@ -1051,21 +1108,23 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error return process.ErrMissingHeader } - shardHdr, ok := headerInfo.hdr.(*block.Header) + shardHeader, ok := headerInfo.hdr.(*block.Header) if !ok { mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return process.ErrWrongTypeAssertion } - if tmpLastNotarizedHdrForShard[shardHdr.ShardId].GetNonce() < shardHdr.Nonce { - tmpLastNotarizedHdrForShard[shardHdr.ShardId] = shardHdr + if lastCrossNotarizedHeaderForShard[shardHeader.ShardId].hdr.GetNonce() < shardHeader.Nonce { + lastCrossNotarizedHeaderForShard[shardHeader.ShardId] = &hashAndHdr{hdr: shardHeader, hash: shardHeaderHash} } } mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.notarizedHdrs[i] = append(mp.notarizedHdrs[i], tmpLastNotarizedHdrForShard[i]) - DisplayLastNotarized(mp.marshalizer, mp.hasher, tmpLastNotarizedHdrForShard[i], i) + for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { + header := lastCrossNotarizedHeaderForShard[shardID].hdr + hash := lastCrossNotarizedHeaderForShard[shardID].hash + mp.blockTracker.AddCrossNotarizedHeader(shardID, header, hash) + DisplayLastNotarized(mp.marshalizer, mp.hasher, header, shardID) } return nil @@ -1073,18 +1132,16 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error // check if shard headers were signed and constructed correctly and returns headers which has to be // checked for finality -func (mp *metaProcessor) checkShardHeadersValidity() (map[uint32]data.HeaderHandler, error) { - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, process.ErrNotarizedHdrsSliceIsNil - } +func (mp *metaProcessor) checkShardHeadersValidity(metaHdr *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { + lastCrossNotarizedHeader := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { + lastCrossNotarizedHeaderForShard, _, err := mp.blockTracker.GetLastCrossNotarizedHeader(shardID) + if err != nil { + return nil, err + } - tmpLastNotarized := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - tmpLastNotarized[i] = mp.lastNotarizedHdrForShard(i) + lastCrossNotarizedHeader[shardID] = lastCrossNotarizedHeaderForShard } - mp.mutNotarizedHdrs.RUnlock() usedShardHdrs := mp.sortHeadersForCurrentBlockByNonce(true) highestNonceHdrs := make(map[uint32]data.HeaderHandler, len(usedShardHdrs)) @@ -1093,15 +1150,41 @@ func (mp *metaProcessor) checkShardHeadersValidity() (map[uint32]data.HeaderHand return highestNonceHdrs, nil } - for shardId, hdrsForShard := range usedShardHdrs { + for shardID, hdrsForShard := range usedShardHdrs { for _, shardHdr := range hdrsForShard { - err := mp.isHdrConstructionValid(shardHdr, tmpLastNotarized[shardId]) + err := mp.headerValidator.IsHeaderConstructionValid(shardHdr, lastCrossNotarizedHeader[shardID]) if err != nil { - return nil, err + return nil, fmt.Errorf("%w : checkShardHeadersValidity -> isHdrConstructionValid", err) } - tmpLastNotarized[shardId] = shardHdr - highestNonceHdrs[shardId] = shardHdr + lastCrossNotarizedHeader[shardID] = shardHdr + highestNonceHdrs[shardID] = shardHdr + } + } + + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + + for _, shardData := range metaHdr.ShardInfo { + actualHdr := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardData.HeaderHash)].hdr + shardHdr, ok := actualHdr.(*block.Header) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + if len(shardData.ShardMiniBlockHeaders) != len(shardHdr.MiniBlockHeaders) { + return nil, process.ErrHeaderShardDataMismatch + } + + mapMiniBlockHeadersInMetaBlock := make(map[string]struct{}) + for _, shardMiniBlockHdr := range shardData.ShardMiniBlockHeaders { + mapMiniBlockHeadersInMetaBlock[string(shardMiniBlockHdr.Hash)] = struct{}{} + } + + for _, actualMiniBlockHdr := range shardHdr.MiniBlockHeaders { + if _, ok := mapMiniBlockHeadersInMetaBlock[string(actualMiniBlockHdr.Hash)]; !ok { + return nil, process.ErrHeaderShardDataMismatch + } } } @@ -1131,10 +1214,10 @@ func (mp *metaProcessor) checkShardHeadersFinality(highestNonceHdrs map[uint32]d // found a header with the next nonce if shardHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(shardHdr, lastVerifiedHdr) + err := mp.headerValidator.IsHeaderConstructionValid(shardHdr, lastVerifiedHdr) if err != nil { - go mp.removeHeaderFromPools(shardHdr, mp.dataPool.ShardHeaders(), mp.dataPool.HeadersNonces()) - log.Debug("isHdrConstructionValid", "error", err.Error()) + log.Debug("checkShardHeadersFinality -> isHdrConstructionValid", + "error", err.Error()) continue } @@ -1144,8 +1227,8 @@ func (mp *metaProcessor) checkShardHeadersFinality(highestNonceHdrs map[uint32]d } if nextBlocksVerified < mp.shardBlockFinality { - go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()) - go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) + go mp.requestHandler.RequestShardHeaderByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()) + go mp.requestHandler.RequestShardHeaderByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) errFinal = process.ErrHeaderNotFinal } } @@ -1153,71 +1236,20 @@ func (mp *metaProcessor) checkShardHeadersFinality(highestNonceHdrs map[uint32]d return errFinal } -func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr *block.Header, sortedShardHdrs []*block.Header) (bool, []uint32) { - if currHdr == nil { - return false, nil - } - if sortedShardHdrs == nil { - return false, nil - } - if lastHdr == nil { - return false, nil - } - - err := mp.isHdrConstructionValid(currHdr, lastHdr) - if err != nil { - return false, nil - } - - // verify if there are "K" block after current to make this one final - lastVerifiedHdr := currHdr - nextBlocksVerified := uint32(0) - hdrIds := make([]uint32, 0) - for i := 0; i < len(sortedShardHdrs); i++ { - if nextBlocksVerified >= mp.shardBlockFinality { - return true, hdrIds - } - - // found a header with the next nonce - tmpHdr := sortedShardHdrs[i] - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - hdrIds = append(hdrIds, uint32(i)) - } - } - - if nextBlocksVerified >= mp.shardBlockFinality { - return true, hdrIds - } - - return false, nil -} - // receivedShardHeader is a call back function which is called when a new header // is added in the headers pool -func (mp *metaProcessor) receivedShardHeader(shardHeaderHash []byte) { - shardHeaderPool := mp.dataPool.ShardHeaders() - if shardHeaderPool == nil { +func (mp *metaProcessor) receivedShardHeader(headerHandler data.HeaderHandler, shardHeaderHash []byte) { + shardHeadersPool := mp.dataPool.Headers() + if shardHeadersPool == nil { return } - obj, ok := shardHeaderPool.Peek(shardHeaderHash) + shardHeader, ok := headerHandler.(*block.Header) if !ok { return } - shardHeader, ok := obj.(*block.Header) - if !ok { - return - } - - log.Debug("received shard block from network", + log.Trace("received shard header from network", "shard", shardHeader.ShardId, "round", shardHeader.Round, "nonce", shardHeader.Nonce, @@ -1258,15 +1290,19 @@ func (mp *metaProcessor) receivedShardHeader(shardHeaderHash []byte) { mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } - mp.setLastHdrForShard(shardHeader.GetShardID(), shardHeader) + isShardHeaderWithOldEpochAndBadRound := shardHeader.Epoch < mp.epochStartTrigger.Epoch() && + shardHeader.Round > mp.epochStartTrigger.EpochFinalityAttestingRound()+process.EpochChangeGracePeriod && + mp.epochStartTrigger.EpochStartRound() < mp.epochStartTrigger.EpochFinalityAttestingRound() + if isShardHeaderWithOldEpochAndBadRound { + log.Debug("shard header with old epoch and bad round", + "shardEpoch", shardHeader.Epoch, + "metaEpoch", mp.epochStartTrigger.Epoch(), + "shardRound", shardHeader.Round, + "metaFinalityAttestingRound", mp.epochStartTrigger.EpochFinalityAttestingRound()) + } - if mp.isHeaderOutOfRange(shardHeader, shardHeaderPool) { - shardHeaderPool.Remove(shardHeaderHash) - - headersNoncesPool := mp.dataPool.HeadersNonces() - if headersNoncesPool != nil { - headersNoncesPool.Remove(shardHeader.GetNonce(), shardHeader.GetShardID()) - } + if mp.isHeaderOutOfRange(shardHeader, shardHeadersPool.MaxSize()) || isShardHeaderWithOldEpochAndBadRound { + shardHeadersPool.RemoveHeaderByHash(shardHeaderHash) return } @@ -1284,8 +1320,6 @@ func (mp *metaProcessor) requestMissingFinalityAttestingShardHeaders() uint32 { missingFinalityAttestingHeaders := mp.requestMissingFinalityAttestingHeaders( shardId, mp.shardBlockFinality, - mp.getShardHeaderFromPoolWithNonce, - mp.dataPool.ShardHeaders(), ) missingFinalityAttestingShardHeaders += missingFinalityAttestingHeaders @@ -1307,7 +1341,7 @@ func (mp *metaProcessor) requestShardHeaders(metaBlock *block.MetaBlock) (uint32 for shardId, shardHeaderHashes := range missingHeaderHashes { for _, hash := range shardHeaderHashes { mp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil, usedInBlock: true} - go mp.onRequestHeaderHandler(shardId, hash) + go mp.requestHandler.RequestShardHeader(shardId, hash) } } @@ -1330,7 +1364,7 @@ func (mp *metaProcessor) computeMissingAndExistingShardHeaders(metaBlock *block. shardData := metaBlock.ShardInfo[i] hdr, err := process.GetShardHeaderFromPool( shardData.HeaderHash, - mp.dataPool.ShardHeaders()) + mp.dataPool.Headers()) if err != nil { missingHeadersHashes[shardData.ShardID] = append(missingHeadersHashes[shardData.ShardID], shardData.HeaderHash) @@ -1363,9 +1397,11 @@ func (mp *metaProcessor) createShardInfo( round uint64, ) ([]block.ShardData, error) { - shardInfo := make([]block.ShardData, 0, len(mp.hdrsForCurrBlock.hdrHashAndInfo)) + shardInfo := make([]block.ShardData, 0) + if mp.epochStartTrigger.IsEpochStart() { + return shardInfo, nil + } - log.Debug("creating shard info has been started") mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for hdrHash, hdrInfo := range mp.hdrsForCurrBlock.hdrHashAndInfo { shardHdr, ok := hdrInfo.hdr.(*block.Header) @@ -1408,8 +1444,8 @@ func (mp *metaProcessor) createShardInfo( } mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - log.Debug("creating shard info has been finished", - "created shard data", len(shardInfo), + log.Debug("created shard data", + "size", len(shardInfo), ) return shardInfo, nil } @@ -1421,13 +1457,18 @@ func (mp *metaProcessor) createPeerInfo() ([]block.PeerData, error) { } // ApplyBodyToHeader creates a miniblock header list given a block body -func (mp *metaProcessor) ApplyBodyToHeader(hdr data.HeaderHandler, bodyHandler data.BodyHandler) error { - log.Debug("started creating block header", - "round", hdr.GetRound(), - ) +func (mp *metaProcessor) ApplyBodyToHeader(hdr data.HeaderHandler, bodyHandler data.BodyHandler) (data.BodyHandler, error) { + sw := core.NewStopWatch() + sw.Start("ApplyBodyToHeader") + defer func() { + sw.Stop("ApplyBodyToHeader") + + log.Debug("measurements ApplyBodyToHeader", sw.GetMeasurements()...) + }() + metaHdr, ok := hdr.(*block.MetaBlock) if !ok { - return process.ErrWrongTypeAssertion + return nil, process.ErrWrongTypeAssertion } var err error @@ -1441,182 +1482,259 @@ func (mp *metaProcessor) ApplyBodyToHeader(hdr data.HeaderHandler, bodyHandler d } }() + sw.Start("createShardInfo") shardInfo, err := mp.createShardInfo(hdr.GetRound()) + sw.Stop("createShardInfo") if err != nil { - return err + return nil, err } + sw.Start("createPeerInfo") peerInfo, err := mp.createPeerInfo() + sw.Stop("createPeerInfo") if err != nil { - return err + return nil, err } + metaHdr.Epoch = mp.epochStartTrigger.Epoch() metaHdr.ShardInfo = shardInfo metaHdr.PeerInfo = peerInfo metaHdr.RootHash = mp.getRootHash() metaHdr.TxCount = getTxCount(shardInfo) - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return nil + if check.IfNil(bodyHandler) { + return nil, process.ErrNilBlockBody } body, ok := bodyHandler.(block.Body) if !ok { err = process.ErrWrongTypeAssertion - return err + return nil, err + } + + sw.Start("CreateReceiptsHash") + metaHdr.ReceiptsHash, err = mp.txCoordinator.CreateReceiptsHash() + sw.Stop("CreateReceiptsHash") + if err != nil { + return nil, err } totalTxCount, miniBlockHeaders, err := mp.createMiniBlockHeaders(body) if err != nil { - return err + return nil, err } metaHdr.MiniBlockHeaders = miniBlockHeaders metaHdr.TxCount += uint32(totalTxCount) - rootHash, err := mp.validatorStatisticsProcessor.UpdatePeerState(metaHdr) + sw.Start("UpdatePeerState") + metaHdr.ValidatorStatsRootHash, err = mp.validatorStatisticsProcessor.UpdatePeerState(metaHdr) + sw.Stop("UpdatePeerState") if err != nil { - return err + return nil, err } - metaHdr.ValidatorStatsRootHash = rootHash + sw.Start("createEpochStartForMetablock") + epochStart, err := mp.createEpochStartForMetablock() + sw.Stop("createEpochStartForMetablock") + if err != nil { + return nil, err + } + metaHdr.EpochStart = *epochStart - return nil + mp.blockSizeThrottler.Add( + metaHdr.GetRound(), + core.MaxUint32(metaHdr.ItemsInBody(), metaHdr.ItemsInHeader())) + + return body, nil } -func (mp *metaProcessor) waitForBlockHeaders(waitTime time.Duration) error { - select { - case <-mp.chRcvAllHdrs: +func (mp *metaProcessor) verifyEpochStartDataForMetablock(metaBlock *block.MetaBlock) error { + if !metaBlock.IsStartOfEpochBlock() { return nil - case <-time.After(waitTime): - return process.ErrTimeIsOut } -} -// CreateNewHeader creates a new header -func (mp *metaProcessor) CreateNewHeader() data.HeaderHandler { - return &block.MetaBlock{} -} + epochStart, err := mp.createEpochStartForMetablock() + if err != nil { + return err + } -// MarshalizedDataToBroadcast prepares underlying data into a marshalized object according to destination -func (mp *metaProcessor) MarshalizedDataToBroadcast( - _ data.HeaderHandler, - bodyHandler data.BodyHandler, -) (map[uint32][]byte, map[string][][]byte, error) { + receivedEpochStartHash, err := core.CalculateHash(mp.marshalizer, mp.hasher, metaBlock.EpochStart) + if err != nil { + return err + } - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return nil, nil, process.ErrNilMiniBlocks + createdEpochStartHash, err := core.CalculateHash(mp.marshalizer, mp.hasher, *epochStart) + if err != nil { + return err } - body, ok := bodyHandler.(block.Body) - if !ok { - return nil, nil, process.ErrWrongTypeAssertion + if !bytes.Equal(receivedEpochStartHash, createdEpochStartHash) { + return process.ErrEpochStartDataDoesNotMatch } - bodies, mrsTxs := mp.txCoordinator.CreateMarshalizedData(body) - mrsData := make(map[uint32][]byte, len(bodies)) + return nil +} - for shardId, subsetBlockBody := range bodies { - buff, err := mp.marshalizer.Marshal(subsetBlockBody) - if err != nil { - log.Debug(process.ErrMarshalWithoutSuccess.Error()) - continue - } - mrsData[shardId] = buff +func (mp *metaProcessor) createEpochStartForMetablock() (*block.EpochStart, error) { + if !mp.epochStartTrigger.IsEpochStart() { + return &block.EpochStart{}, nil } - return mrsData, mrsTxs, nil -} + epochStart, lastNotarizedHeaders, err := mp.getLastNotarizedAndFinalizedHeaders() + if err != nil { + return nil, err + } -func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte, map[uint32][]*block.Header, error) { - shardBlocksPool := mp.dataPool.ShardHeaders() - if shardBlocksPool == nil { - return nil, nil, nil, process.ErrNilShardBlockPool + pendingMiniBlocks, err := mp.pendingMiniBlocks.PendingMiniBlockHeaders(lastNotarizedHeaders) + if err != nil { + return nil, err } - hashAndBlockMap := make(map[uint32][]*hashAndHdr) - headersMap := make(map[uint32][]*block.Header) - headers := make([]*block.Header, 0) - hdrHashes := make([][]byte, 0) + for _, pendingMiniBlock := range pendingMiniBlocks { + recvShId := pendingMiniBlock.ReceiverShardID - mp.mutNotarizedHdrs.RLock() - if mp.notarizedHdrs == nil { - mp.mutNotarizedHdrs.RUnlock() - return nil, nil, nil, process.ErrNotarizedHdrsSliceIsNil + epochStart.LastFinalizedHeaders[recvShId].PendingMiniBlockHeaders = + append(epochStart.LastFinalizedHeaders[recvShId].PendingMiniBlockHeaders, pendingMiniBlock) } - // get keys and arrange them into shards - for _, key := range shardBlocksPool.Keys() { - val, _ := shardBlocksPool.Peek(key) - if val == nil { - continue + return epochStart, nil +} + +func (mp *metaProcessor) getLastNotarizedAndFinalizedHeaders() (*block.EpochStart, []data.HeaderHandler, error) { + epochStart := &block.EpochStart{ + LastFinalizedHeaders: make([]block.EpochStartShardData, 0), + } + + lastNotarizedHeaders := make([]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) + for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { + lastCrossNotarizedHeaderForShard, _, err := mp.blockTracker.GetLastCrossNotarizedHeader(shardID) + if err != nil { + return nil, nil, err } - hdr, ok := val.(*block.Header) + shardHeader, ok := lastCrossNotarizedHeaderForShard.(*block.Header) if !ok { - continue + return nil, nil, process.ErrWrongTypeAssertion } - if hdr.GetRound() > round { - continue + hdrHash, err := core.CalculateHash(mp.marshalizer, mp.hasher, lastCrossNotarizedHeaderForShard) + if err != nil { + return nil, nil, err } - currShardId := hdr.ShardId - if mp.lastNotarizedHdrForShard(currShardId) == nil { - continue + lastMetaHash, lastFinalizedMetaHash, err := mp.getLastFinalizedMetaHashForShard(shardHeader) + if err != nil { + return nil, nil, err } - if hdr.GetRound() <= mp.lastNotarizedHdrForShard(currShardId).GetRound() { - continue + finalHeader := block.EpochStartShardData{ + ShardId: lastCrossNotarizedHeaderForShard.GetShardID(), + HeaderHash: hdrHash, + RootHash: lastCrossNotarizedHeaderForShard.GetRootHash(), + FirstPendingMetaBlock: lastMetaHash, + LastFinishedMetaBlock: lastFinalizedMetaHash, } - if hdr.GetNonce() <= mp.lastNotarizedHdrForShard(currShardId).GetNonce() { + epochStart.LastFinalizedHeaders = append(epochStart.LastFinalizedHeaders, finalHeader) + lastNotarizedHeaders[shardID] = lastCrossNotarizedHeaderForShard + } + + return epochStart, lastNotarizedHeaders, nil +} + +func (mp *metaProcessor) getLastFinalizedMetaHashForShard(shardHdr *block.Header) ([]byte, []byte, error) { + var lastMetaHash []byte + var lastFinalizedMetaHash []byte + + for currentHdr := shardHdr; currentHdr.GetNonce() > 0 && currentHdr.GetEpoch() == shardHdr.GetEpoch(); { + prevShardHdr, err := process.GetShardHeader(currentHdr.GetPrevHash(), mp.dataPool.Headers(), mp.marshalizer, mp.store) + if err != nil { + return nil, nil, err + } + + if len(currentHdr.MetaBlockHashes) == 0 { + currentHdr = prevShardHdr continue } - hashAndBlockMap[currShardId] = append(hashAndBlockMap[currShardId], - &hashAndHdr{hdr: hdr, hash: key}) - } - mp.mutNotarizedHdrs.RUnlock() + numAddedMetas := len(currentHdr.MetaBlockHashes) + if numAddedMetas > 1 { + if len(lastMetaHash) == 0 { + lastMetaHash = currentHdr.MetaBlockHashes[numAddedMetas-1] + lastFinalizedMetaHash = currentHdr.MetaBlockHashes[numAddedMetas-2] + return lastMetaHash, lastFinalizedMetaHash, nil + } - // sort headers for each shard - maxHdrLen := 0 - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := hashAndBlockMap[shardId] - if len(hdrsForShard) == 0 { - continue + if bytes.Equal(lastMetaHash, currentHdr.MetaBlockHashes[numAddedMetas-1]) { + lastFinalizedMetaHash = currentHdr.MetaBlockHashes[numAddedMetas-2] + return lastMetaHash, lastFinalizedMetaHash, nil + } + + lastFinalizedMetaHash = currentHdr.MetaBlockHashes[numAddedMetas-1] + return lastMetaHash, lastFinalizedMetaHash, nil } - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].hdr.GetNonce() < hdrsForShard[j].hdr.GetNonce() - }) + if len(lastMetaHash) == 0 { + lastMetaHash = currentHdr.MetaBlockHashes[numAddedMetas-1] + currentHdr = prevShardHdr + continue + } - tmpHdrLen := len(hdrsForShard) - if maxHdrLen < tmpHdrLen { - maxHdrLen = tmpHdrLen + lastFinalizedMetaHash = currentHdr.MetaBlockHashes[numAddedMetas-1] + if !bytes.Equal(lastMetaHash, lastFinalizedMetaHash) { + return lastMetaHash, lastFinalizedMetaHash, nil } + + currentHdr = prevShardHdr } - // copy from map to lists - equality between number of headers per shard - for i := 0; i < maxHdrLen; i++ { - for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - hdrsForShard := hashAndBlockMap[shardId] - if i >= len(hdrsForShard) { - continue - } + //TODO: get header hash from last epoch start metablock + return nil, nil, nil +} - hdr, ok := hdrsForShard[i].hdr.(*block.Header) - if !ok { - continue - } +func (mp *metaProcessor) waitForBlockHeaders(waitTime time.Duration) error { + select { + case <-mp.chRcvAllHdrs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } +} + +// CreateNewHeader creates a new header +func (mp *metaProcessor) CreateNewHeader() data.HeaderHandler { + return &block.MetaBlock{} +} + +// MarshalizedDataToBroadcast prepares underlying data into a marshalized object according to destination +func (mp *metaProcessor) MarshalizedDataToBroadcast( + _ data.HeaderHandler, + bodyHandler data.BodyHandler, +) (map[uint32][]byte, map[string][][]byte, error) { + + if bodyHandler == nil || bodyHandler.IsInterfaceNil() { + return nil, nil, process.ErrNilMiniBlocks + } + + body, ok := bodyHandler.(block.Body) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + bodies, mrsTxs := mp.txCoordinator.CreateMarshalizedData(body) + mrsData := make(map[uint32][]byte, len(bodies)) - headers = append(headers, hdr) - hdrHashes = append(hdrHashes, hdrsForShard[i].hash) - headersMap[shardId] = append(headersMap[shardId], hdr) + for shardId, subsetBlockBody := range bodies { + buff, err := mp.marshalizer.Marshal(subsetBlockBody) + if err != nil { + log.Debug(process.ErrMarshalWithoutSuccess.Error()) + continue } + mrsData[shardId] = buff } - return headers, hdrHashes, headersMap, nil + return mrsData, mrsTxs, nil } func getTxCount(shardInfo []block.ShardData) uint32 { @@ -1666,26 +1784,10 @@ func (mp *metaProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { // IsInterfaceNil returns true if there is no value under the interface func (mp *metaProcessor) IsInterfaceNil() bool { - if mp == nil { - return true - } - return false -} - -func (mp *metaProcessor) getShardHeaderFromPoolWithNonce( - nonce uint64, - shardId uint32, -) (data.HeaderHandler, []byte, error) { - - shardHeader, shardHeaderHash, err := process.GetShardHeaderFromPoolWithNonce( - nonce, - shardId, - mp.dataPool.ShardHeaders(), - mp.dataPool.HeadersNonces()) - - return shardHeader, shardHeaderHash, err + return mp == nil } +// GetBlockBodyFromPool returns block body from pool for a given header func (mp *metaProcessor) GetBlockBodyFromPool(headerHandler data.HeaderHandler) (data.BodyHandler, error) { miniBlockPool := mp.dataPool.MiniBlocks() if miniBlockPool == nil { diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index a9fb93c5339..bf21e834abf 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -11,7 +11,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/process" blproc "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" @@ -24,6 +23,13 @@ import ( func createMockMetaArguments() blproc.ArgMetaProcessor { mdp := initMetaDataPool() shardCoordinator := mock.NewOneShardCoordinatorMock() + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: &mock.HasherStub{}, + Marshalizer: &mock.MarshalizerMock{}, + } + headerValidator, _ := blproc.NewHeaderValidator(argsHeaderValidator) + + startHeaders := createGenesisBlocks(shardCoordinator) arguments := blproc.ArgMetaProcessor{ ArgBaseProcessor: blproc.ArgBaseProcessor{ Accounts: &mock.AccountsStub{}, @@ -35,23 +41,26 @@ func createMockMetaArguments() blproc.ArgMetaProcessor { NodesCoordinator: mock.NewNodesCoordinatorMock(), SpecialAddressHandler: &mock.SpecialAddressHandlerMock{}, Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - StartHeaders: createGenesisBlocks(shardCoordinator), - RequestHandler: &mock.RequestHandlerMock{}, + RequestHandler: &mock.RequestHandlerStub{}, Core: &mock.ServiceContainerMock{}, BlockChainHook: &mock.BlockChainHookHandlerMock{}, TxCoordinator: &mock.TransactionCoordinatorMock{}, ValidatorStatisticsProcessor: &mock.ValidatorStatisticsProcessorMock{}, + EpochStartTrigger: &mock.EpochStartTriggerStub{}, + HeaderValidator: headerValidator, Rounder: &mock.RounderMock{}, BootStorer: &mock.BoostrapStorerMock{ PutCalled: func(round int64, bootData bootstrapStorage.BootstrapData) error { return nil }, }, + BlockTracker: mock.NewBlockTrackerMock(shardCoordinator, startHeaders), }, DataPool: mdp, SCDataGetter: &mock.ScQueryMock{}, SCToProtocol: &mock.SCToProtocolStub{}, PeerChangesHandler: &mock.PeerChangesHandler{}, + PendingMiniBlocks: &mock.PendingMiniBlocksHandlerStub{}, } return arguments } @@ -140,7 +149,9 @@ func setLastNotarizedHdr( round uint64, nonce uint64, randSeed []byte, - lastNotarizedHdrs map[uint32][]data.HeaderHandler) { + lastNotarizedHdrs map[uint32][]data.HeaderHandler, + blockTracker process.BlockTracker, +) { for i := uint32(0); i < noOfShards; i++ { lastHdr := &block.Header{Round: round, Nonce: nonce, @@ -152,6 +163,7 @@ func setLastNotarizedHdr( } else { lastNotarizedHdrs[i] = append(lastNotarizedHdrs[i], lastHdr) } + blockTracker.AddCrossNotarizedHeader(i, lastHdr, nil) } } @@ -256,6 +268,28 @@ func TestNewMetaProcessor_NilTxCoordinatorShouldErr(t *testing.T) { assert.Nil(t, be) } +func TestNewMetaProcessor_NilEpochStartShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments() + arguments.EpochStartTrigger = nil + + be, err := blproc.NewMetaProcessor(arguments) + assert.Equal(t, process.ErrNilEpochStartTrigger, err) + assert.Nil(t, be) +} + +func TestNewMetaProcessor_NilPendingMiniBlocksShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments() + arguments.PendingMiniBlocks = nil + + be, err := blproc.NewMetaProcessor(arguments) + assert.Equal(t, process.ErrNilPendingMiniBlocksHandler, err) + assert.Nil(t, be) +} + func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -485,9 +519,6 @@ func TestMetaProcessor_ProcessBlockHeaderShouldPass(t *testing.T) { func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { t.Parallel() - hash := []byte("aaa") - ShardID := sharding.MetachainShardId - mdp := initMetaDataPool() accounts := &mock.AccountsStub{} accounts.RevertToSnapshotCalled = func(snapshot int) error { @@ -495,19 +526,8 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { } arguments := createMockMetaArguments() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(3) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) arguments.DataPool = mdp mp, _ := blproc.NewMetaProcessor(arguments) - mdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - cs := &mock.Uint64SyncMapCacherStub{} - cs.GetCalled = func(key uint64) (dataRetriever.ShardIdHashMap, bool) { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(ShardID, hash) - - return syncMap, true - } - return cs - } mp.AddHdrHashToRequestedList(&block.Header{}, []byte("header_hash")) mp.SetHighestHdrNonceForCurrentBlock(0, 1) mp.SetHighestHdrNonceForCurrentBlock(1, 2) @@ -595,13 +615,21 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { arguments.Accounts = accounts arguments.Store = store arguments.ForkDetector = &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { return 0 }, } + blockTrackerMock := &mock.BlockTrackerMock{ + GetCrossNotarizedHeaderCalled: func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + return &block.MetaBlock{}, []byte("hash"), nil + }, + } + _ = blockTrackerMock.InitCrossNotarizedHeaders(createGenesisBlocks(arguments.ShardCoordinator)) + arguments.BlockTracker = blockTrackerMock + mp, _ := blproc.NewMetaProcessor(arguments) blkc, _ := blockchain.NewMetaChain( @@ -613,34 +641,6 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { assert.Nil(t, err) } -func TestMetaProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { - t.Parallel() - - mdp := initMetaDataPool() - accounts := &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := createMetaBlockHeader() - body := block.Body{} - store := initStore() - - arguments := createMockMetaArguments() - arguments.Accounts = accounts - arguments.Store = store - arguments.DataPool = mdp - mp, _ := blproc.NewMetaProcessor(arguments) - - mdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return nil - } - blkc := createTestBlockchain() - err := mp.CommitBlock(blkc, hdr, body) - - assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) -} - func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { t.Parallel() @@ -664,11 +664,8 @@ func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { arguments.Hasher = hasher mp, _ := blproc.NewMetaProcessor(arguments) - mdp.ShardHeadersCalled = func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, + mdp.HeadersCalled = func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{ MaxSizeCalled: func() int { return 1000 }, @@ -697,7 +694,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { } forkDetectorAddCalled := false fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { if header == hdr { forkDetectorAddCalled = true return nil @@ -724,17 +721,24 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { arguments.ForkDetector = fd arguments.Store = store arguments.Hasher = hasher + blockTrackerMock := &mock.BlockTrackerMock{ + GetCrossNotarizedHeaderCalled: func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + return &block.MetaBlock{}, []byte("hash"), nil + }, + } + _ = blockTrackerMock.InitCrossNotarizedHeaders(createGenesisBlocks(arguments.ShardCoordinator)) + arguments.BlockTracker = blockTrackerMock mp, _ := blproc.NewMetaProcessor(arguments) removeHdrWasCalled := false - mdp.ShardHeadersCalled = func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + mdp.HeadersCalled = func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - return &block.Header{}, true + cs.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.Header{}, nil } - cs.RemoveCalled = func(key []byte) { + cs.RemoveHeaderByHashCalled = func(key []byte) { if bytes.Equal(key, []byte("hdr_hash1")) { removeHdrWasCalled = true } @@ -745,7 +749,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { cs.MaxSizeCalled = func() int { return 1000 } - cs.KeysCalled = func() [][]byte { + cs.NoncesCalled = func(shardId uint32) []uint64 { return nil } return cs @@ -772,17 +776,12 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) - mdp.ShardHeadersCalled = func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { - } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - return nil, false + mdp.HeadersCalled = func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } - cs.RemoveCalled = func(key []byte) { - } - cs.LenCalled = func() int { - return 0 + cs.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") } cs.MaxSizeCalled = func() int { return 1000 @@ -839,7 +838,29 @@ func TestMetaProcessor_ApplyBodyToHeaderShouldWork(t *testing.T) { mp, _ := blproc.NewMetaProcessor(arguments) hdr := &block.MetaBlock{} - err := mp.ApplyBodyToHeader(hdr, nil) + _, err := mp.ApplyBodyToHeader(hdr, block.Body{}) + assert.Nil(t, err) +} + +func TestMetaProcessor_ApplyBodyToHeaderShouldSetEpochStart(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 + }, + RootHashCalled: func() ([]byte, error) { + return []byte("root"), nil + }, + } + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + + metaBlk := &block.MetaBlock{TimeStamp: 12345} + bodyHandler := block.Body{&block.MiniBlock{Type: 0}} + _, err := mp.ApplyBodyToHeader(metaBlk, bodyHandler) assert.Nil(t, err) } @@ -902,7 +923,7 @@ func TestMetaProcessor_ReceivedHeaderShouldDecreaseMissing(t *testing.T) { mp.AddHdrHashToRequestedList(nil, hdrHash3) //received txHash2 - pool.ShardHeaders().Put(hdrHash2, hdr2) + pool.Headers().AddHeader(hdrHash2, hdr2) time.Sleep(100 * time.Millisecond) @@ -943,17 +964,17 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) //put the existing headers inside datapool - pool.ShardHeaders().Put(hdrHash1, &block.Header{ + pool.Headers().AddHeader(hdrHash1, &block.Header{ Round: 1, Nonce: 45, ShardId: 0, MiniBlockHeaders: miniBlockHeaders1}) - pool.ShardHeaders().Put(hdrHash2, &block.Header{ + pool.Headers().AddHeader(hdrHash2, &block.Header{ Round: 2, Nonce: 45, ShardId: 1, MiniBlockHeaders: miniBlockHeaders2}) - pool.ShardHeaders().Put(hdrHash3, &block.Header{ + pool.Headers().AddHeader(hdrHash3, &block.Header{ Round: 3, Nonce: 45, ShardId: 2, @@ -972,7 +993,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) } arguments.DataPool = pool arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -1032,7 +1054,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) } arguments.DataPool = pool arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -1040,35 +1063,41 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) prevRandSeed := []byte("prevrand") notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs, arguments.BlockTracker) //put the existing headers inside datapool prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) - pool.ShardHeaders().Put(hdrHash1, &block.Header{ + hdr1 := &block.Header{ Round: 10, Nonce: 45, ShardId: 0, PrevRandSeed: prevRandSeed, PrevHash: prevHash, - MiniBlockHeaders: miniBlockHeaders1}) + MiniBlockHeaders: miniBlockHeaders1} + pool.Headers().AddHeader(hdrHash1, hdr1) + arguments.BlockTracker.AddTrackedHeader(hdr1, hdrHash1) prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(1).(*block.Header)) - pool.ShardHeaders().Put(hdrHash2, &block.Header{ + hdr2 := &block.Header{ Round: 20, Nonce: 45, ShardId: 1, PrevRandSeed: prevRandSeed, PrevHash: prevHash, - MiniBlockHeaders: miniBlockHeaders2}) + MiniBlockHeaders: miniBlockHeaders2} + pool.Headers().AddHeader(hdrHash2, hdr2) + arguments.BlockTracker.AddTrackedHeader(hdr2, hdrHash2) prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(2).(*block.Header)) - pool.ShardHeaders().Put(hdrHash3, &block.Header{ + hdr3 := &block.Header{ Round: 30, Nonce: 45, ShardId: 2, PrevRandSeed: prevRandSeed, PrevHash: prevHash, - MiniBlockHeaders: miniBlockHeaders3}) + MiniBlockHeaders: miniBlockHeaders3} + pool.Headers().AddHeader(hdrHash3, hdr3) + arguments.BlockTracker.AddTrackedHeader(hdr3, hdrHash3) mp.SetShardBlockFinality(0) round := uint64(40) @@ -1129,7 +1158,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { } arguments.DataPool = pool arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -1138,7 +1168,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs, arguments.BlockTracker) headers := make([]*block.Header, 0) @@ -1165,8 +1195,9 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders1}) - pool.ShardHeaders().Put(hdrHash1, headers[0]) - pool.ShardHeaders().Put(hdrHash11, headers[1]) + pool.Headers().AddHeader(hdrHash1, headers[0]) + pool.Headers().AddHeader(hdrHash11, headers[1]) + arguments.BlockTracker.AddTrackedHeader(headers[0], hdrHash1) // header shard 1 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(1).(*block.Header)) @@ -1189,8 +1220,9 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders2}) - pool.ShardHeaders().Put(hdrHash2, headers[2]) - pool.ShardHeaders().Put(hdrHash22, headers[3]) + pool.Headers().AddHeader(hdrHash2, headers[2]) + pool.Headers().AddHeader(hdrHash22, headers[3]) + arguments.BlockTracker.AddTrackedHeader(headers[2], hdrHash2) // header shard 2 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(2).(*block.Header)) @@ -1213,8 +1245,9 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders3}) - pool.ShardHeaders().Put(hdrHash3, headers[4]) - pool.ShardHeaders().Put(hdrHash33, headers[5]) + pool.Headers().AddHeader(hdrHash3, headers[4]) + pool.Headers().AddHeader(hdrHash33, headers[5]) + arguments.BlockTracker.AddTrackedHeader(headers[4], hdrHash3) mp.SetShardBlockFinality(1) round := uint64(15) @@ -1275,7 +1308,8 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { } arguments.DataPool = pool arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) arguments.Store = initStore() mp, _ := blproc.NewMetaProcessor(arguments) @@ -1284,7 +1318,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs, arguments.BlockTracker) headers := make([]*block.Header, 0) @@ -1311,8 +1345,9 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders1}) - pool.ShardHeaders().Put(hdrHash1, headers[0]) - pool.ShardHeaders().Put(hdrHash11, headers[1]) + pool.Headers().AddHeader(hdrHash1, headers[0]) + pool.Headers().AddHeader(hdrHash11, headers[1]) + arguments.BlockTracker.AddTrackedHeader(headers[0], hdrHash1) // header shard 1 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(1).(*block.Header)) @@ -1335,8 +1370,9 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders2}) - pool.ShardHeaders().Put(hdrHash2, headers[2]) - pool.ShardHeaders().Put(hdrHash22, headers[3]) + pool.Headers().AddHeader(hdrHash2, headers[2]) + pool.Headers().AddHeader(hdrHash22, headers[3]) + arguments.BlockTracker.AddTrackedHeader(headers[2], hdrHash2) // header shard 2 prevHash, _ = mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(2).(*block.Header)) @@ -1359,8 +1395,9 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders3}) - pool.ShardHeaders().Put(hdrHash3, headers[4]) - pool.ShardHeaders().Put(hdrHash33, headers[5]) + pool.Headers().AddHeader(hdrHash3, headers[4]) + pool.Headers().AddHeader(hdrHash33, headers[5]) + arguments.BlockTracker.AddTrackedHeader(headers[4], hdrHash3) mp.SetShardBlockFinality(1) round := uint64(20) @@ -1419,7 +1456,7 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { err := mp.RestoreBlockIntoPools(mhdr, body) - hdrFromPool, _ := pool.ShardHeaders().Get(hdrHash) + hdrFromPool, _ := pool.Headers().GetHeaderByHash(hdrHash) assert.Nil(t, err) assert.Equal(t, &hdr, hdrFromPool) } @@ -1443,14 +1480,15 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { arguments.DataPool = pool arguments.Store = initStore() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") notarizedHdrs := mp.NotarizedHdrs() firstNonce := uint64(44) - setLastNotarizedHdr(noOfShards, 9, firstNonce, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, firstNonce, prevRandSeed, notarizedHdrs, arguments.BlockTracker) //put the existing headers inside datapool @@ -1491,8 +1529,8 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) // wrong header type in pool and defer called - pool.ShardHeaders().Put(currHash, metaHdr) - pool.ShardHeaders().Put(prevHash, prevHdr) + pool.Headers().AddHeader(currHash, metaHdr) + pool.Headers().AddHeader(prevHash, prevHdr) mp.SetHdrForCurrentBlock(currHash, metaHdr, true) mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) @@ -1502,8 +1540,8 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { assert.Equal(t, firstNonce, mp.LastNotarizedHdrForShard(currHdr.ShardId).GetNonce()) // put headers in pool - pool.ShardHeaders().Put(currHash, currHdr) - pool.ShardHeaders().Put(prevHash, prevHdr) + pool.Headers().AddHeader(currHash, currHdr) + pool.Headers().AddHeader(prevHash, prevHdr) mp.CreateBlockStarted() mp.SetHdrForCurrentBlock(currHash, currHdr, true) mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) @@ -1533,13 +1571,21 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { arguments.DataPool = pool arguments.Store = initStore() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) + + argsHeaderValidator := blproc.ArgsHeaderValidator{ + Hasher: arguments.Hasher, + Marshalizer: arguments.Marshalizer, + } + arguments.HeaderValidator, _ = blproc.NewHeaderValidator(argsHeaderValidator) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs, arguments.BlockTracker) //put the existing headers inside datapool @@ -1564,9 +1610,9 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { PrevHash: prevHash, RootHash: []byte("currRootHash")} currHash, _ := mp.ComputeHeaderHash(currHdr) - pool.ShardHeaders().Put(currHash, currHdr) + pool.Headers().AddHeader(currHash, currHdr) prevHash, _ = mp.ComputeHeaderHash(prevHdr) - pool.ShardHeaders().Put(prevHash, prevHdr) + pool.Headers().AddHeader(prevHash, prevHdr) wrongCurrHdr := &block.Header{ Round: 11, Nonce: 48, @@ -1576,7 +1622,7 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { PrevHash: prevHash, RootHash: []byte("currRootHash")} wrongCurrHash, _ := mp.ComputeHeaderHash(wrongCurrHdr) - pool.ShardHeaders().Put(wrongCurrHash, wrongCurrHdr) + pool.Headers().AddHeader(wrongCurrHash, wrongCurrHdr) metaHdr := &block.MetaBlock{Round: 20} shDataCurr := block.ShardData{ShardID: 0, HeaderHash: wrongCurrHash} @@ -1588,8 +1634,8 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { mp.SetHdrForCurrentBlock(wrongCurrHash, wrongCurrHdr, true) mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) - _, err := mp.CheckShardHeadersValidity() - assert.Equal(t, process.ErrWrongNonceInBlock, err) + _, err := mp.CheckShardHeadersValidity(metaHdr) + assert.True(t, errors.Is(err, process.ErrWrongNonceInBlock)) shDataCurr = block.ShardData{ShardID: 0, HeaderHash: currHash} metaHdr.ShardInfo = make([]block.ShardData, 0) @@ -1601,7 +1647,7 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { mp.SetHdrForCurrentBlock(currHash, currHdr, true) mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) - highestNonceHdrs, err := mp.CheckShardHeadersValidity() + highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) assert.Nil(t, err) assert.NotNil(t, highestNonceHdrs) assert.Equal(t, currHdr.Nonce, highestNonceHdrs[currHdr.ShardId].GetNonce()) @@ -1625,13 +1671,14 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi arguments.DataPool = pool arguments.Store = initStore() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs, arguments.BlockTracker) //put the existing headers inside datapool currHdr := &block.Header{ @@ -1643,7 +1690,7 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi PrevHash: []byte("prevhash"), RootHash: []byte("currRootHash")} currHash, _ := mp.ComputeHeaderHash(currHdr) - pool.ShardHeaders().Put(currHash, currHdr) + pool.Headers().AddHeader(currHash, currHdr) metaHdr := &block.MetaBlock{Round: 20} shDataCurr := block.ShardData{ShardID: 0, HeaderHash: currHash} @@ -1652,9 +1699,9 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi mp.SetHdrForCurrentBlock(currHash, currHdr, true) - highestNonceHdrs, err := mp.CheckShardHeadersValidity() + highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) assert.Nil(t, highestNonceHdrs) - assert.Equal(t, process.ErrWrongNonceInBlock, err) + assert.True(t, errors.Is(err, process.ErrWrongNonceInBlock)) } func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) { @@ -1676,22 +1723,24 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) arguments.DataPool = pool arguments.Store = initStore() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) mp, _ := blproc.NewMetaProcessor(arguments) - prevRandSeed := []byte("prevrand") + prevRandSeed := startHeaders[0].GetRandSeed() currRandSeed := []byte("currrand") + prevHash, _ := mp.ComputeHeaderHash(startHeaders[0]) notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 0, 0, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 0, 0, prevRandSeed, notarizedHdrs, arguments.BlockTracker) //put the existing headers inside datapool currHdr := &block.Header{ - Round: 0, - Nonce: 0, + Round: 1, + Nonce: 1, ShardId: 0, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: []byte("prevhash"), + PrevRandSeed: prevRandSeed, + RandSeed: currRandSeed, + PrevHash: prevHash, RootHash: []byte("currRootHash")} currHash, _ := mp.ComputeHeaderHash(currHdr) @@ -1701,12 +1750,13 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) metaHdr.ShardInfo = make([]block.ShardData, 0) metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataCurr) - highestNonceHdrs, err := mp.CheckShardHeadersValidity() + highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) assert.Equal(t, 0, len(highestNonceHdrs)) + assert.Nil(t, err) - pool.ShardHeaders().Put(currHash, currHdr) + pool.Headers().AddHeader(currHash, currHdr) mp.SetHdrForCurrentBlock(currHash, currHdr, true) - highestNonceHdrs, err = mp.CheckShardHeadersValidity() + highestNonceHdrs, err = mp.CheckShardHeadersValidity(metaHdr) assert.NotNil(t, highestNonceHdrs) assert.Nil(t, err) assert.Equal(t, currHdr.Nonce, highestNonceHdrs[currHdr.ShardId].GetNonce()) @@ -1730,13 +1780,14 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { arguments.DataPool = pool arguments.Store = initStore() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs, arguments.BlockTracker) //put the existing headers inside datapool @@ -1770,7 +1821,7 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { PrevHash: prevHash, RootHash: []byte("currRootHash")} prevHash, _ = mp.ComputeHeaderHash(nextWrongHdr) - pool.ShardHeaders().Put(prevHash, nextWrongHdr) + pool.Headers().AddHeader(prevHash, nextWrongHdr) mp.SetShardBlockFinality(0) metaHdr := &block.MetaBlock{Round: 1} @@ -1812,7 +1863,7 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { RootHash: []byte("currRootHash")} nextHash, _ := mp.ComputeHeaderHash(nextHdr) - pool.ShardHeaders().Put(nextHash, nextHdr) + pool.Headers().AddHeader(nextHash, nextHdr) mp.SetHdrForCurrentBlock(nextHash, nextHdr, false) metaHdr.Round = 20 @@ -1838,13 +1889,14 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { arguments.DataPool = pool arguments.Store = initStore() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) + setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs, arguments.BlockTracker) //put the existing headers inside datapool @@ -1879,17 +1931,6 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { err = mp.IsHdrConstructionValid(currHdr, prevHdr) assert.Equal(t, err, process.ErrWrongNonceInBlock) - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateDoesNotMatch) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - prevHdr.RootHash = nil - err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) - currHdr.Nonce = 46 prevHdr.Nonce = 45 prevHdr.Round = currHdr.Round + 1 @@ -1918,119 +1959,6 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { assert.Nil(t, err) } -func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { - t.Parallel() - - pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - - arguments := createMockMetaArguments() - arguments.Accounts = &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, - } - arguments.DataPool = pool - arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) - arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) - arguments.Store = initStore() - mp, _ := blproc.NewMetaProcessor(arguments) - - prevRandSeed := []byte("prevrand") - currRandSeed := []byte("currrand") - notarizedHdrs := mp.NotarizedHdrs() - setLastNotarizedHdr(noOfShards, 9, 44, prevRandSeed, notarizedHdrs) - - //put the existing headers inside datapool - - //header shard 0 - prevHash, _ := mp.ComputeHeaderHash(mp.LastNotarizedHdrForShard(0).(*block.Header)) - prevHdr := &block.Header{ - Round: 10, - Nonce: 45, - ShardId: 0, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - wrongPrevHdr := &block.Header{ - Round: 10, - Nonce: 50, - ShardId: 0, - PrevRandSeed: prevRandSeed, - RandSeed: currRandSeed, - PrevHash: prevHash, - RootHash: []byte("prevRootHash")} - - prevHash, _ = mp.ComputeHeaderHash(prevHdr) - currHdr := &block.Header{ - Round: 11, - Nonce: 46, - ShardId: 0, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - - srtShardHdrs := make([]*block.Header, 0) - - valid, hdrIds := mp.IsShardHeaderValidFinal(currHdr, prevHdr, nil) - assert.False(t, valid) - assert.Nil(t, hdrIds) - - valid, hdrIds = mp.IsShardHeaderValidFinal(nil, prevHdr, srtShardHdrs) - assert.False(t, valid) - assert.Nil(t, hdrIds) - - valid, hdrIds = mp.IsShardHeaderValidFinal(currHdr, nil, srtShardHdrs) - assert.False(t, valid) - assert.Nil(t, hdrIds) - - valid, hdrIds = mp.IsShardHeaderValidFinal(currHdr, wrongPrevHdr, srtShardHdrs) - assert.False(t, valid) - assert.Nil(t, hdrIds) - - mp.SetShardBlockFinality(0) - valid, hdrIds = mp.IsShardHeaderValidFinal(currHdr, prevHdr, srtShardHdrs) - assert.True(t, valid) - assert.NotNil(t, hdrIds) - - mp.SetShardBlockFinality(1) - nextWrongHdr := &block.Header{ - Round: 12, - Nonce: 44, - ShardId: 0, - PrevRandSeed: currRandSeed, - RandSeed: []byte("nextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - - srtShardHdrs = append(srtShardHdrs, nextWrongHdr) - valid, hdrIds = mp.IsShardHeaderValidFinal(currHdr, prevHdr, srtShardHdrs) - assert.False(t, valid) - assert.Nil(t, hdrIds) - - prevHash, _ = mp.ComputeHeaderHash(currHdr) - nextHdr := &block.Header{ - Round: 12, - Nonce: 47, - ShardId: 0, - PrevRandSeed: []byte("nextrand"), - RandSeed: []byte("nextnextrand"), - PrevHash: prevHash, - RootHash: []byte("currRootHash")} - - srtShardHdrs = append(srtShardHdrs, nextHdr) - valid, hdrIds = mp.IsShardHeaderValidFinal(currHdr, prevHdr, srtShardHdrs) - assert.True(t, valid) - assert.NotNil(t, hdrIds) -} - func TestMetaProcessor_DecodeBlockBody(t *testing.T) { t.Parallel() @@ -2168,7 +2096,14 @@ func TestMetaProcessor_CreateMiniBlocksDestMe(t *testing.T) { t.Parallel() hash1 := []byte("hash1") + hdr1 := &block.Header{ + Nonce: 1, + Round: 1, + PrevRandSeed: []byte("roothash"), + MiniBlockHeaders: []block.MiniBlockHeader{{Hash: hash1, SenderShardID: 1}}, + } hdrHash1Bytes := []byte("hdr_hash1") + hdr2 := &block.Header{Nonce: 2, Round: 2} hdrHash2Bytes := []byte("hdr_hash2") expectedMiniBlock1 := &block.MiniBlock{TxHashes: [][]byte{hash1}} expectedMiniBlock2 := &block.MiniBlock{TxHashes: [][]byte{[]byte("hash2")}} @@ -2176,30 +2111,24 @@ func TestMetaProcessor_CreateMiniBlocksDestMe(t *testing.T) { dPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } - dPool.ShardHeadersCalled = func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + dPool.HeadersCalled = func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(hdrHash1Bytes, key) { - return &block.Header{ - Nonce: 1, - Round: 1, - PrevRandSeed: []byte("roothash"), - MiniBlockHeaders: []block.MiniBlockHeader{{Hash: hash1, SenderShardID: 1}}, - }, true + cs.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + if hdrNonce == 1 { + return []data.HeaderHandler{hdr1}, [][]byte{hdrHash1Bytes}, nil } - if bytes.Equal(hdrHash2Bytes, key) { - return &block.Header{Nonce: 2, Round: 2}, true + if hdrNonce == 2 { + return []data.HeaderHandler{hdr2}, [][]byte{hdrHash2Bytes}, nil } - return nil, false + return nil, nil, errors.New("err") } cs.LenCalled = func() int { return 0 } - cs.RemoveCalled = func(key []byte) {} - cs.KeysCalled = func() [][]byte { - return [][]byte{hdrHash1Bytes, hdrHash2Bytes} + cs.NoncesCalled = func(shardId uint32) []uint64 { + return []uint64{1, 2} } cs.MaxSizeCalled = func() int { return 1000 @@ -2219,6 +2148,7 @@ func TestMetaProcessor_CreateMiniBlocksDestMe(t *testing.T) { arguments := createMockMetaArguments() arguments.DataPool = dPool arguments.TxCoordinator = txCoordinator + arguments.BlockTracker.AddTrackedHeader(hdr1, hdrHash1Bytes) mp, _ := blproc.NewMetaProcessor(arguments) round := uint64(10) @@ -2336,30 +2266,29 @@ func TestMetaProcessor_VerifyCrossShardMiniBlocksDstMe(t *testing.T) { dPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } - dPool.ShardHeadersCalled = func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + dPool.HeadersCalled = func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { + cs.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { if bytes.Equal(hdrHash1Bytes, key) { return &block.Header{ Nonce: 1, Round: 1, PrevRandSeed: []byte("roothash"), MiniBlockHeaders: []block.MiniBlockHeader{{Hash: hash1, SenderShardID: 1}}, - }, true + }, nil } if bytes.Equal(hdrHash2Bytes, key) { - return &block.Header{Nonce: 2, Round: 2}, true + return &block.Header{Nonce: 2, Round: 2}, nil } - return nil, false + return nil, errors.New("err") } cs.LenCalled = func() int { return 0 } - cs.RemoveCalled = func(key []byte) {} - cs.KeysCalled = func() [][]byte { - return [][]byte{hdrHash1Bytes, hdrHash2Bytes} + cs.NoncesCalled = func(shardId uint32) []uint64 { + return []uint64{1, 2} } cs.MaxSizeCalled = func() int { return 1000 @@ -2429,11 +2358,11 @@ func TestMetaProcessor_CreateBlockCreateHeaderProcessBlock(t *testing.T) { dPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } - dPool.ShardHeadersCalled = func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + dPool.HeadersCalled = func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } - cs.PeekCalled = func(key []byte) (value interface{}, ok bool) { + cs.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { if bytes.Equal(hdrHash1Bytes, key) { return &block.Header{ PrevHash: []byte("hash1"), @@ -2441,19 +2370,18 @@ func TestMetaProcessor_CreateBlockCreateHeaderProcessBlock(t *testing.T) { Round: 1, PrevRandSeed: []byte("roothash"), MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("hash1"), SenderShardID: 1}}, - }, true + }, nil } if bytes.Equal(hrdHash2Bytes, key) { - return &block.Header{Nonce: 2, Round: 2}, true + return &block.Header{Nonce: 2, Round: 2}, nil } - return nil, false + return nil, errors.New("err") } cs.LenCalled = func() int { return 0 } - cs.RemoveCalled = func(key []byte) {} - cs.KeysCalled = func() [][]byte { - return [][]byte{hdrHash1Bytes, hrdHash2Bytes} + cs.NoncesCalled = func(shardId uint32) []uint64 { + return []uint64{1, 2} } cs.MaxSizeCalled = func() int { return 1000 @@ -2492,3 +2420,202 @@ func TestMetaProcessor_CreateBlockCreateHeaderProcessBlock(t *testing.T) { err = mp.ProcessBlock(blkc, headerHandler, bodyHandler, func() time.Duration { return time.Second }) assert.Nil(t, err) } + +func TestMetaProcessor_CreateEpochStartFromMetaBlockEpochIsNotStarted(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments() + arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ + IsEpochStartCalled: func() bool { + return false + }, + } + + mp, _ := blproc.NewMetaProcessor(arguments) + + epStart, err := mp.CreateEpochStartForMetablock() + assert.Nil(t, err) + + emptyEpochStart := block.EpochStart{} + assert.Equal(t, emptyEpochStart, *epStart) +} + +func TestMetaProcessor_CreateEpochStartFromMetaBlockHashComputeIssueShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("err computing hash") + + arguments := createMockMetaArguments() + arguments.Marshalizer = &mock.MarshalizerStub{ + // trigger an error on the Marshal method called from core's ComputeHash + MarshalCalled: func(obj interface{}) (i []byte, e error) { + return nil, expectedErr + }, + } + arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ + IsEpochStartCalled: func() bool { + return true + }, + } + + mp, err := blproc.NewMetaProcessor(arguments) + assert.Nil(t, err) + + epStart, err := mp.CreateEpochStartForMetablock() + assert.Nil(t, epStart) + assert.Equal(t, expectedErr, err) +} + +func TestMetaProcessor_CreateEpochStartFromMetaBlockShouldWork(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments() + arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ + IsEpochStartCalled: func() bool { + return true + }, + } + + hash1 := []byte("hash1") + hash2 := []byte("hash2") + + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) + + hdr := startHeaders[0].(*block.Header) + hdr.MetaBlockHashes = [][]byte{hash1, hash2} + hdr.Nonce = 1 + startHeaders[0] = hdr + + dPool := initMetaDataPool() + dPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + dPool.HeadersCalled = func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { + } + cs.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.Header{ + PrevHash: []byte("hash1"), + Nonce: 1, + Round: 1, + PrevRandSeed: []byte("roothash"), + MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("hash1"), SenderShardID: 1}}, + MetaBlockHashes: [][]byte{[]byte("hash1"), []byte("hash2")}, + }, nil + } + + cs.LenCalled = func() int { + return 0 + } + cs.RemoveHeaderByHashCalled = func(key []byte) {} + cs.NoncesCalled = func(shardId uint32) []uint64 { + return []uint64{1, 2} + } + cs.MaxSizeCalled = func() int { + return 1000 + } + return cs + } + arguments.DataPool = dPool + arguments.PendingMiniBlocks = &mock.PendingMiniBlocksHandlerStub{ + PendingMiniBlockHeadersCalled: func(lastNotarizedHeaders []data.HeaderHandler) ([]block.ShardMiniBlockHeader, error) { + var hdrs []block.ShardMiniBlockHeader + hdrs = append(hdrs, block.ShardMiniBlockHeader{ + Hash: hash1, + ReceiverShardID: 0, + SenderShardID: 1, + TxCount: 2, + }) + + return hdrs, nil + }, + } + mp, _ := blproc.NewMetaProcessor(arguments) + + epStart, err := mp.CreateEpochStartForMetablock() + assert.Nil(t, err) + assert.NotNil(t, epStart) + assert.Equal(t, hash1, epStart.LastFinalizedHeaders[0].LastFinishedMetaBlock) + assert.Equal(t, hash2, epStart.LastFinalizedHeaders[0].FirstPendingMetaBlock) +} + +func TestShardProcessor_getLastFinalizedMetaHashForShardMetaHashNotFoundShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments() + arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ + IsEpochStartCalled: func() bool { + return false + }, + } + + mp, _ := blproc.NewMetaProcessor(arguments) + round := uint64(10) + + shardHdr := &block.Header{Round: round} + last, lastFinal, err := mp.GetLastFinalizedMetaHashForShard(shardHdr) + assert.Nil(t, last) + assert.Nil(t, lastFinal) + assert.Equal(t, nil, err) +} + +func TestShardProcessor_getLastFinalizedMetaHashForShardShouldWork(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments() + arguments.EpochStartTrigger = &mock.EpochStartTriggerStub{ + IsEpochStartCalled: func() bool { + return false + }, + } + + dPool := initMetaDataPool() + dPool.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } + dPool.HeadersCalled = func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { + } + cs.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.Header{ + PrevHash: []byte("hash1"), + Nonce: 2, + Round: 2, + PrevRandSeed: []byte("roothash"), + MiniBlockHeaders: []block.MiniBlockHeader{{Hash: []byte("hash1"), SenderShardID: 1}}, + MetaBlockHashes: [][]byte{[]byte("hash1"), []byte("hash2")}, + }, nil + } + + cs.LenCalled = func() int { + return 0 + } + cs.RemoveHeaderByHashCalled = func(key []byte) {} + cs.NoncesCalled = func(shardId uint32) []uint64 { + return []uint64{1, 2} + } + cs.MaxSizeCalled = func() int { + return 1000 + } + return cs + } + + arguments.DataPool = dPool + + mp, _ := blproc.NewMetaProcessor(arguments) + round := uint64(10) + nonce := uint64(1) + + shardHdr := &block.Header{ + Round: round, + Nonce: nonce, + MetaBlockHashes: [][]byte{[]byte("mb_hash1")}, + } + last, lastFinal, err := mp.GetLastFinalizedMetaHashForShard(shardHdr) + assert.NotNil(t, last) + assert.NotNil(t, lastFinal) + assert.Nil(t, err) +} diff --git a/process/block/metrics.go b/process/block/metrics.go index 7e7326fef97..92dd192b07a 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -82,14 +82,15 @@ func saveMetricsForACommittedBlock( isInConsensus bool, currentBlockHash string, highestFinalBlockNonce uint64, - headerMetaNonce uint64, + headerMeta data.HeaderHandler, ) { if isInConsensus { appStatusHandler.Increment(core.MetricCountConsensusAcceptedBlocks) } + appStatusHandler.SetUInt64Value(core.MetricEpochNumber, uint64(headerMeta.GetEpoch())) appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, currentBlockHash) appStatusHandler.SetUInt64Value(core.MetricHighestFinalBlockInShard, highestFinalBlockNonce) - appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, fmt.Sprintf("meta %d", headerMetaNonce)) + appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, fmt.Sprintf("meta %d", headerMeta.GetNonce())) } func saveMetachainCommitBlockMetrics( @@ -100,7 +101,7 @@ func saveMetachainCommitBlockMetrics( ) { appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, display.DisplayByteSlice(headerHash)) - + appStatusHandler.SetUInt64Value(core.MetricEpochNumber, uint64(header.Epoch)) pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(header.PrevRandSeed, header.Round, sharding.MetachainShardId) if err != nil { log.Debug("cannot get validators public keys", "error", err.Error()) diff --git a/process/block/poolsCleaner/txPoolsCleaner.go b/process/block/poolsCleaner/txPoolsCleaner.go index 15b7a77dcf5..48038a91765 100644 --- a/process/block/poolsCleaner/txPoolsCleaner.go +++ b/process/block/poolsCleaner/txPoolsCleaner.go @@ -80,6 +80,11 @@ func (tpc *TxPoolsCleaner) Clean(duration time.Duration) (bool, error) { } } +// TODO, tx cache cleanup optimization: +// Getting all the keys of the cache (see below) can be pretty time consuming especially when the txs pool is full. +// We can redesign the cleanup for the new cache type so that we improve the processing time. +// One idea is that when cleaning executed tx hashes for a block, we can remove all the txs with lower nonce from the accounts-txs cache, for the respective account as well. +// https://github.com/ElrondNetwork/elrond-go/pull/863#discussion_r363641694 func (tpc *TxPoolsCleaner) cleanPools(haveTime func() bool) { atomic.StoreUint64(&tpc.numRemovedTxs, 0) diff --git a/process/block/poolsCleaner/txPoolsCleaner_test.go b/process/block/poolsCleaner/txPoolsCleaner_test.go index 863f9d0ce1b..ef2a31717a5 100644 --- a/process/block/poolsCleaner/txPoolsCleaner_test.go +++ b/process/block/poolsCleaner/txPoolsCleaner_test.go @@ -3,6 +3,7 @@ package poolsCleaner_test import ( "bytes" "math/big" + "sync/atomic" "testing" "time" @@ -27,6 +28,25 @@ func getAccAdapter(nonce uint64, balance *big.Int) *mock.AccountsStub { return accDB } +func initDataPoolWithDelayedKeys(delay time.Duration) *mock.PoolsHolderStub { + return &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + KeysCalled: func() [][]byte { + time.Sleep(delay) + + return make([][]byte, 0) + }, + } + }, + } + }, + } +} + func initDataPoolWithFourTransactions() *mock.PoolsHolderStub { delayedFetchingKey := "key1" validTxKey := "key2" @@ -271,18 +291,21 @@ func TestTxPoolsCleaner_CleanWillDoNothingIfIsCalledMultipleTime(t *testing.T) { balance := big.NewInt(1) accounts := getAccAdapter(nonce, balance) shardCoordinator := mock.NewOneShardCoordinatorMock() - tdp := initDataPoolWithFourTransactions() + tdp := initDataPoolWithDelayedKeys(time.Second) addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) - go func() { - _, _ = txsPoolsCleaner.Clean(time.Second) - }() - time.Sleep(time.Millisecond) - go func() { - itRan, _ := txsPoolsCleaner.Clean(time.Second) - assert.Equal(t, false, itRan) - }() + numRun := uint32(0) + for i := 0; i < 10; i++ { + go func() { + itRan, _ := txsPoolsCleaner.Clean(time.Second) + if itRan { + atomic.AddUint32(&numRun, 1) + } + }() + } + + time.Sleep(time.Second * 2) - time.Sleep(2 * time.Second) + assert.Equal(t, uint32(1), atomic.LoadUint32(&numRun)) } diff --git a/process/block/postprocess/basePostProcess.go b/process/block/postprocess/basePostProcess.go new file mode 100644 index 00000000000..e24d380f883 --- /dev/null +++ b/process/block/postprocess/basePostProcess.go @@ -0,0 +1,147 @@ +package postprocess + +import ( + "bytes" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type txShardInfo struct { + senderShardID uint32 + receiverShardID uint32 +} + +type txInfo struct { + tx data.TransactionHandler + *txShardInfo +} + +var log = logger.GetOrCreate("process/block/postprocess") + +type basePostProcessor struct { + hasher hashing.Hasher + marshalizer marshal.Marshalizer + store dataRetriever.StorageService + shardCoordinator sharding.Coordinator + storageType dataRetriever.UnitType + + mutInterResultsForBlock sync.Mutex + interResultsForBlock map[string]*txInfo + intraShardMiniBlock *block.MiniBlock +} + +// SaveCurrentIntermediateTxToStorage saves all current intermediate results to the provided storage unit +func (bpp *basePostProcessor) SaveCurrentIntermediateTxToStorage() error { + bpp.mutInterResultsForBlock.Lock() + defer bpp.mutInterResultsForBlock.Unlock() + + for _, txInfoValue := range bpp.interResultsForBlock { + if txInfoValue.tx == nil { + return process.ErrMissingTransaction + } + + buff, err := bpp.marshalizer.Marshal(txInfoValue.tx) + if err != nil { + return err + } + + errNotCritical := bpp.store.Put(bpp.storageType, bpp.hasher.Compute(string(buff)), buff) + if errNotCritical != nil { + log.Debug("SaveCurrentIntermediateTxToStorage put", "type", bpp.storageType, "error", errNotCritical.Error()) + } + } + + return nil +} + +// CreateBlockStarted cleans the local cache map for processed/created intermediate transactions at this round +func (bpp *basePostProcessor) CreateBlockStarted() { + bpp.mutInterResultsForBlock.Lock() + bpp.interResultsForBlock = make(map[string]*txInfo) + bpp.intraShardMiniBlock = nil + bpp.mutInterResultsForBlock.Unlock() +} + +// CreateMarshalizedData creates the marshalized data for broadcasting purposes +func (bpp *basePostProcessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + bpp.mutInterResultsForBlock.Lock() + defer bpp.mutInterResultsForBlock.Unlock() + + mrsTxs := make([][]byte, 0, len(txHashes)) + for _, txHash := range txHashes { + txInfo := bpp.interResultsForBlock[string(txHash)] + if txInfo == nil || txInfo.tx == nil { + continue + } + + txMrs, err := bpp.marshalizer.Marshal(txInfo.tx) + if err != nil { + return nil, process.ErrMarshalWithoutSuccess + } + mrsTxs = append(mrsTxs, txMrs) + } + + return mrsTxs, nil +} + +// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round +func (bpp *basePostProcessor) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { + bpp.mutInterResultsForBlock.Lock() + + scrPool := make(map[string]data.TransactionHandler) + for txHash, txInfo := range bpp.interResultsForBlock { + if txInfo.receiverShardID != bpp.shardCoordinator.SelfId() { + continue + } + if txInfo.senderShardID != bpp.shardCoordinator.SelfId() { + continue + } + scrPool[txHash] = txInfo.tx + } + bpp.mutInterResultsForBlock.Unlock() + + return scrPool +} + +func (bpp *basePostProcessor) verifyMiniBlock(createMBs map[uint32]*block.MiniBlock, mb *block.MiniBlock) error { + createdScrMb, ok := createMBs[mb.ReceiverShardID] + if createdScrMb == nil || !ok { + return process.ErrNilMiniBlocks + } + + createdHash, err := core.CalculateHash(bpp.marshalizer, bpp.hasher, createdScrMb) + if err != nil { + return err + } + + receivedHash, err := core.CalculateHash(bpp.marshalizer, bpp.hasher, mb) + if err != nil { + return err + } + + if !bytes.Equal(createdHash, receivedHash) { + return process.ErrMiniBlockHashMismatch + } + + return nil +} + +func (bpp *basePostProcessor) GetCreatedInShardMiniBlock() *block.MiniBlock { + bpp.mutInterResultsForBlock.Lock() + defer bpp.mutInterResultsForBlock.Unlock() + + if bpp.intraShardMiniBlock == nil { + return nil + } + + return bpp.intraShardMiniBlock.Clone() +} diff --git a/process/block/preprocess/intermediateResults.go b/process/block/postprocess/intermediateResults.go similarity index 59% rename from process/block/preprocess/intermediateResults.go rename to process/block/postprocess/intermediateResults.go index bf51ad66547..0bc607742e5 100644 --- a/process/block/preprocess/intermediateResults.go +++ b/process/block/postprocess/intermediateResults.go @@ -1,11 +1,11 @@ -package preprocess +package postprocess import ( "bytes" "sort" - "sync" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" @@ -18,16 +18,11 @@ import ( ) type intermediateResultsProcessor struct { - hasher hashing.Hasher - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - adrConv state.AddressConverter - store dataRetriever.StorageService - blockType block.Type - currTxs dataRetriever.TransactionCacher - - mutInterResultsForBlock sync.Mutex - interResultsForBlock map[string]*txInfo + adrConv state.AddressConverter + blockType block.Type + currTxs dataRetriever.TransactionCacher + + *basePostProcessor } // NewIntermediateResultsProcessor creates a new intermediate results processor @@ -40,33 +35,38 @@ func NewIntermediateResultsProcessor( blockType block.Type, currTxs dataRetriever.TransactionCacher, ) (*intermediateResultsProcessor, error) { - if hasher == nil || hasher.IsInterfaceNil() { + if check.IfNil(hasher) { return nil, process.ErrNilHasher } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, process.ErrNilMarshalizer } - if coordinator == nil || coordinator.IsInterfaceNil() { + if check.IfNil(coordinator) { return nil, process.ErrNilShardCoordinator } - if adrConv == nil || adrConv.IsInterfaceNil() { + if check.IfNil(adrConv) { return nil, process.ErrNilAddressConverter } - if store == nil || store.IsInterfaceNil() { + if check.IfNil(store) { return nil, process.ErrNilStorage } - if currTxs == nil || currTxs.IsInterfaceNil() { + if check.IfNil(currTxs) { return nil, process.ErrNilTxForCurrentBlockHandler } - irp := &intermediateResultsProcessor{ + base := &basePostProcessor{ hasher: hasher, marshalizer: marshalizer, shardCoordinator: coordinator, - adrConv: adrConv, - blockType: blockType, store: store, - currTxs: currTxs, + storageType: dataRetriever.UnsignedTransactionUnit, + } + + irp := &intermediateResultsProcessor{ + basePostProcessor: base, + adrConv: adrConv, + blockType: blockType, + currTxs: currTxs, } irp.interResultsForBlock = make(map[string]*txInfo, 0) @@ -106,6 +106,10 @@ func (irp *intermediateResultsProcessor) CreateAllInterMiniBlocks() map[uint32]* } } + if _, ok := finalMBs[irp.shardCoordinator.SelfId()]; ok { + irp.intraShardMiniBlock = finalMBs[irp.shardCoordinator.SelfId()].Clone() + } + irp.mutInterResultsForBlock.Unlock() return finalMBs @@ -115,6 +119,7 @@ func (irp *intermediateResultsProcessor) CreateAllInterMiniBlocks() map[uint32]* func (irp *intermediateResultsProcessor) VerifyInterMiniBlocks(body block.Body) error { scrMbs := irp.CreateAllInterMiniBlocks() + countedCrossShard := 0 for i := 0; i < len(body); i++ { mb := body[i] if mb.Type != irp.blockType { @@ -124,24 +129,27 @@ func (irp *intermediateResultsProcessor) VerifyInterMiniBlocks(body block.Body) continue } - createdScrMb, ok := scrMbs[mb.ReceiverShardID] - if createdScrMb == nil || !ok { - return process.ErrNilMiniBlocks - } - - createdHash, err := core.CalculateHash(irp.marshalizer, irp.hasher, createdScrMb) + countedCrossShard++ + err := irp.verifyMiniBlock(scrMbs, mb) if err != nil { return err } + } - receivedHash, err := core.CalculateHash(irp.marshalizer, irp.hasher, mb) - if err != nil { - return err + createCrossShard := 0 + for _, mb := range scrMbs { + if mb.Type != irp.blockType { + continue } - - if !bytes.Equal(createdHash, receivedHash) { - return process.ErrMiniBlockHashMismatch + if mb.ReceiverShardID == irp.shardCoordinator.SelfId() { + continue } + + createCrossShard++ + } + + if createCrossShard != countedCrossShard { + return process.ErrMiniBlockNumMissMatch } return nil @@ -176,38 +184,6 @@ func (irp *intermediateResultsProcessor) AddIntermediateTransactions(txs []data. return nil } -// SaveCurrentIntermediateTxToStorage saves all current intermediate results to the provided storage unit -func (irp *intermediateResultsProcessor) SaveCurrentIntermediateTxToStorage() error { - irp.mutInterResultsForBlock.Lock() - defer irp.mutInterResultsForBlock.Unlock() - - for _, txInfoValue := range irp.interResultsForBlock { - if txInfoValue.tx == nil { - log.Debug("missing transaction in SaveCurrentIntermediateTxToStorage ", "type", dataRetriever.UnsignedTransactionUnit) - return process.ErrMissingTransaction - } - - buff, err := irp.marshalizer.Marshal(txInfoValue.tx) - if err != nil { - return err - } - - errNotCritical := irp.store.Put(dataRetriever.UnsignedTransactionUnit, irp.hasher.Compute(string(buff)), buff) - if errNotCritical != nil { - log.Debug("UnsignedTransactionUnit.Put", "error", errNotCritical.Error()) - } - } - - return nil -} - -// CreateBlockStarted cleans the local cache map for processed/created intermediate transactions at this round -func (irp *intermediateResultsProcessor) CreateBlockStarted() { - irp.mutInterResultsForBlock.Lock() - defer irp.mutInterResultsForBlock.Unlock() - irp.interResultsForBlock = make(map[string]*txInfo, 0) -} - func (irp *intermediateResultsProcessor) getShardIdsFromAddresses(sndAddr []byte, rcvAddr []byte) (uint32, uint32, error) { adrSrc, err := irp.adrConv.CreateAddressFromPublicKeyBytes(sndAddr) if err != nil { @@ -234,52 +210,7 @@ func (irp *intermediateResultsProcessor) getShardIdsFromAddresses(sndAddr []byte return shardForSrc, shardForDst, nil } -// CreateMarshalizedData creates the marshalized data for broadcasting purposes -func (irp *intermediateResultsProcessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { - irp.mutInterResultsForBlock.Lock() - defer irp.mutInterResultsForBlock.Unlock() - - mrsTxs := make([][]byte, 0, len(txHashes)) - for _, txHash := range txHashes { - txInfo := irp.interResultsForBlock[string(txHash)] - - if txInfo == nil || txInfo.tx == nil { - continue - } - - txMrs, err := irp.marshalizer.Marshal(txInfo.tx) - if err != nil { - return nil, process.ErrMarshalWithoutSuccess - } - mrsTxs = append(mrsTxs, txMrs) - } - - return mrsTxs, nil -} - -// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round -func (irp *intermediateResultsProcessor) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { - irp.mutInterResultsForBlock.Lock() - - scrPool := make(map[string]data.TransactionHandler) - for txHash, txInfo := range irp.interResultsForBlock { - if txInfo.receiverShardID != irp.shardCoordinator.SelfId() { - continue - } - if txInfo.senderShardID != irp.shardCoordinator.SelfId() { - continue - } - scrPool[txHash] = txInfo.tx - } - irp.mutInterResultsForBlock.Unlock() - - return scrPool -} - // IsInterfaceNil returns true if there is no value under the interface func (irp *intermediateResultsProcessor) IsInterfaceNil() bool { - if irp == nil { - return true - } - return false + return irp == nil } diff --git a/process/block/preprocess/intermediateResults_test.go b/process/block/postprocess/intermediateResults_test.go similarity index 99% rename from process/block/preprocess/intermediateResults_test.go rename to process/block/postprocess/intermediateResults_test.go index 7a1681e759f..08fe5395a6d 100644 --- a/process/block/preprocess/intermediateResults_test.go +++ b/process/block/postprocess/intermediateResults_test.go @@ -1,4 +1,4 @@ -package preprocess +package postprocess import ( "bytes" @@ -415,7 +415,10 @@ func TestIntermediateResultsProcessor_VerifyInterMiniBlocksBodyShouldpassAsNotCr assert.Nil(t, err) body := block.Body{} - body = append(body, &block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId()}) + body = append(body, &block.MiniBlock{ + Type: block.SmartContractResultBlock, + ReceiverShardID: shardCoordinator.SelfId(), + SenderShardID: shardCoordinator.SelfId() + 1}) err = irp.VerifyInterMiniBlocks(body) assert.Nil(t, err) diff --git a/process/block/postprocess/oneMBPostProcessor.go b/process/block/postprocess/oneMBPostProcessor.go new file mode 100644 index 00000000000..b431adcbac0 --- /dev/null +++ b/process/block/postprocess/oneMBPostProcessor.go @@ -0,0 +1,144 @@ +package postprocess + +import ( + "bytes" + "sort" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type oneMBPostProcessor struct { + blockType block.Type + *basePostProcessor +} + +// NewOneMiniBlockPostProcessor creates a new intermediate results processor +func NewOneMiniBlockPostProcessor( + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + coordinator sharding.Coordinator, + store dataRetriever.StorageService, + blockType block.Type, + storageType dataRetriever.UnitType, +) (*oneMBPostProcessor, error) { + if check.IfNil(hasher) { + return nil, process.ErrNilHasher + } + if check.IfNil(marshalizer) { + return nil, process.ErrNilMarshalizer + } + if check.IfNil(coordinator) { + return nil, process.ErrNilShardCoordinator + } + if check.IfNil(store) { + return nil, process.ErrNilStorage + } + + base := &basePostProcessor{ + hasher: hasher, + marshalizer: marshalizer, + shardCoordinator: coordinator, + store: store, + storageType: storageType, + } + + opp := &oneMBPostProcessor{ + basePostProcessor: base, + blockType: blockType, + } + + opp.interResultsForBlock = make(map[string]*txInfo) + + return opp, nil +} + +// CreateAllInterMiniBlocks returns the miniblock for the current round created from the receipts/bad transactions +func (opp *oneMBPostProcessor) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { + selfId := opp.shardCoordinator.SelfId() + + miniBlocks := make(map[uint32]*block.MiniBlock) + opp.mutInterResultsForBlock.Lock() + defer opp.mutInterResultsForBlock.Unlock() + + if len(opp.interResultsForBlock) == 0 { + return miniBlocks + } + + miniBlocks[selfId] = &block.MiniBlock{ + Type: opp.blockType, + ReceiverShardID: selfId, + SenderShardID: selfId, + } + + for key := range opp.interResultsForBlock { + miniBlocks[selfId].TxHashes = append(miniBlocks[selfId].TxHashes, []byte(key)) + } + + sort.Slice(miniBlocks[selfId].TxHashes, func(a, b int) bool { + return bytes.Compare(miniBlocks[selfId].TxHashes[a], miniBlocks[selfId].TxHashes[b]) < 0 + }) + + opp.intraShardMiniBlock = miniBlocks[selfId].Clone() + + return miniBlocks +} + +// VerifyInterMiniBlocks verifies if the receipts/bad transactions added to the block are valid +func (opp *oneMBPostProcessor) VerifyInterMiniBlocks(body block.Body) error { + scrMbs := opp.CreateAllInterMiniBlocks() + + verifiedOne := false + for i := 0; i < len(body); i++ { + mb := body[i] + if mb.Type != opp.blockType { + continue + } + + if verifiedOne { + return process.ErrTooManyReceiptsMiniBlocks + } + + err := opp.verifyMiniBlock(scrMbs, mb) + if err != nil { + return err + } + + verifiedOne = true + } + + return nil +} + +// AddIntermediateTransactions adds receipts/bad transactions resulting from transaction processor +func (opp *oneMBPostProcessor) AddIntermediateTransactions(txs []data.TransactionHandler) error { + opp.mutInterResultsForBlock.Lock() + defer opp.mutInterResultsForBlock.Unlock() + + selfId := opp.shardCoordinator.SelfId() + + for i := 0; i < len(txs); i++ { + txHash, err := core.CalculateHash(opp.marshalizer, opp.hasher, txs[i]) + if err != nil { + return err + } + + addReceiptShardInfo := &txShardInfo{receiverShardID: selfId, senderShardID: selfId} + scrInfo := &txInfo{tx: txs[i], txShardInfo: addReceiptShardInfo} + opp.interResultsForBlock[string(txHash)] = scrInfo + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (opp *oneMBPostProcessor) IsInterfaceNil() bool { + return opp == nil +} diff --git a/process/block/postprocess/oneMBPostProcessor_test.go b/process/block/postprocess/oneMBPostProcessor_test.go new file mode 100644 index 00000000000..a1f96388df1 --- /dev/null +++ b/process/block/postprocess/oneMBPostProcessor_test.go @@ -0,0 +1,261 @@ +package postprocess + +import ( + "bytes" + "sort" + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewOneMBPostProcessor_NilHasher(t *testing.T) { + t.Parallel() + + irp, err := NewOneMiniBlockPostProcessor( + nil, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + assert.Nil(t, irp) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewOneMBPostProcessor_NilMarshalizer(t *testing.T) { + t.Parallel() + + irp, err := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + assert.Nil(t, irp) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewOneMBPostProcessor_NilShardCoord(t *testing.T) { + t.Parallel() + + irp, err := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + assert.Nil(t, irp) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewOneMBPostProcessor_NilStorer(t *testing.T) { + t.Parallel() + + irp, err := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + nil, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + assert.Nil(t, irp) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewOneMBPostProcessor_OK(t *testing.T) { + t.Parallel() + + irp, err := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + assert.Nil(t, err) + assert.NotNil(t, irp) +} + +func TestOneMBPostProcessor_CreateAllInterMiniBlocks(t *testing.T) { + t.Parallel() + + irp, _ := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + mbs := irp.CreateAllInterMiniBlocks() + assert.Equal(t, 0, len(mbs)) +} + +func TestOneMBPostProcessor_CreateAllInterMiniBlocksOneMinBlock(t *testing.T) { + t.Parallel() + + irp, _ := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + txs := make([]data.TransactionHandler, 0) + txs = append(txs, &transaction.Transaction{}) + txs = append(txs, &transaction.Transaction{}) + + err := irp.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + mbs := irp.CreateAllInterMiniBlocks() + assert.Equal(t, 1, len(mbs)) +} + +func TestOneMBPostProcessor_VerifyNilBody(t *testing.T) { + t.Parallel() + + irp, _ := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + err := irp.VerifyInterMiniBlocks(nil) + assert.Nil(t, err) +} + +func TestOneMBPostProcessor_VerifyTooManyBlock(t *testing.T) { + t.Parallel() + + irp, _ := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + txs := make([]data.TransactionHandler, 0) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr1")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr2")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr3")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr4")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr5")}) + + err := irp.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + miniBlock := &block.MiniBlock{ + SenderShardID: 0, + ReceiverShardID: 0, + Type: block.TxBlock} + + for i := 0; i < len(txs); i++ { + txHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, txs[i]) + miniBlock.TxHashes = append(miniBlock.TxHashes, txHash) + } + + sort.Slice(miniBlock.TxHashes, func(a, b int) bool { + return bytes.Compare(miniBlock.TxHashes[a], miniBlock.TxHashes[b]) < 0 + }) + + body := block.Body{} + body = append(body, miniBlock) + body = append(body, miniBlock) + + err = irp.VerifyInterMiniBlocks(body) + assert.Equal(t, process.ErrTooManyReceiptsMiniBlocks, err) +} + +func TestOneMBPostProcessor_VerifyNilMiniBlocks(t *testing.T) { + t.Parallel() + + irp, _ := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + miniBlock := &block.MiniBlock{ + SenderShardID: 0, + ReceiverShardID: 0, + Type: block.TxBlock} + body := block.Body{} + body = append(body, miniBlock) + + err := irp.VerifyInterMiniBlocks(body) + assert.Equal(t, process.ErrNilMiniBlocks, err) +} + +func TestOneMBPostProcessor_VerifyOk(t *testing.T) { + t.Parallel() + + irp, _ := NewOneMiniBlockPostProcessor( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.ChainStorerMock{}, + block.TxBlock, + dataRetriever.TransactionUnit, + ) + + txs := make([]data.TransactionHandler, 0) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr1")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr2")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr3")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr4")}) + txs = append(txs, &transaction.Transaction{SndAddr: []byte("snd"), RcvAddr: []byte("recvaddr5")}) + + err := irp.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + miniBlock := &block.MiniBlock{ + SenderShardID: 0, + ReceiverShardID: 0, + Type: block.TxBlock} + + for i := 0; i < len(txs); i++ { + txHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, txs[i]) + miniBlock.TxHashes = append(miniBlock.TxHashes, txHash) + } + + sort.Slice(miniBlock.TxHashes, func(a, b int) bool { + return bytes.Compare(miniBlock.TxHashes[a], miniBlock.TxHashes[b]) < 0 + }) + + body := block.Body{} + body = append(body, miniBlock) + + err = irp.VerifyInterMiniBlocks(body) + assert.Nil(t, err) +} diff --git a/process/block/preprocess/adapterFactory.go b/process/block/preprocess/adapterFactory.go new file mode 100644 index 00000000000..33badf77361 --- /dev/null +++ b/process/block/preprocess/adapterFactory.go @@ -0,0 +1,16 @@ +package preprocess + +import ( + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/txcache" +) + +// createSortedTransactionsProvider creates a sorted transactions provider for a given cache +func createSortedTransactionsProvider(transactionsPreprocessor *transactions, cache storage.Cacher, cacheKey string) SortedTransactionsProvider { + txCache, isTxCache := cache.(*txcache.TxCache) + if isTxCache { + return newAdapterTxCacheToSortedTransactionsProvider(txCache) + } + + return newAdapterGenericCacheToSortedTransactionsProvider(transactionsPreprocessor, cache, cacheKey) +} diff --git a/process/block/preprocess/adapterGenericCacheToSortedTransactionsProvider.go b/process/block/preprocess/adapterGenericCacheToSortedTransactionsProvider.go new file mode 100644 index 00000000000..ffe0b9dfca2 --- /dev/null +++ b/process/block/preprocess/adapterGenericCacheToSortedTransactionsProvider.go @@ -0,0 +1,116 @@ +package preprocess + +import ( + "sort" + + "github.com/ElrondNetwork/elrond-go/core/sliceUtil" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type adapterGenericCacheToSortedTransactionsProvider struct { + transactionsPreprocessor *transactions + cache storage.Cacher + cacheKey string +} + +func newAdapterGenericCacheToSortedTransactionsProvider(transactionsPreprocessor *transactions, cache storage.Cacher, cacheKey string) *adapterGenericCacheToSortedTransactionsProvider { + adapter := &adapterGenericCacheToSortedTransactionsProvider{ + transactionsPreprocessor: transactionsPreprocessor, + cache: cache, + cacheKey: cacheKey, + } + + return adapter +} + +// GetSortedTransactions gets the transactions from the cache +func (adapter *adapterGenericCacheToSortedTransactionsProvider) GetSortedTransactions() ([]data.TransactionHandler, [][]byte) { + txs, txHashes := adapter.getOrderedTx() + return txs, txHashes +} + +// getOrderedTx was moved here from the previous implementation +func (adapter *adapterGenericCacheToSortedTransactionsProvider) getOrderedTx() ([]data.TransactionHandler, [][]byte) { + txs := adapter.transactionsPreprocessor + strCache := adapter.cacheKey + + txs.mutOrderedTxs.RLock() + orderedTxs := txs.orderedTxs[strCache] + orderedTxHashes := txs.orderedTxHashes[strCache] + txs.mutOrderedTxs.RUnlock() + + alreadyOrdered := len(orderedTxs) > 0 + if !alreadyOrdered { + orderedTxs, orderedTxHashes = sortTxByNonce(adapter.cache) + + log.Debug("creating mini blocks has been started", + "have num txs", len(orderedTxs), + "strCache", strCache, + ) + + txs.mutOrderedTxs.Lock() + txs.orderedTxs[strCache] = orderedTxs + txs.orderedTxHashes[strCache] = orderedTxHashes + txs.mutOrderedTxs.Unlock() + } + + return orderedTxs, orderedTxHashes +} + +// sortTxByNonce was moved here from the previous implementation +func sortTxByNonce(cache storage.Cacher) ([]data.TransactionHandler, [][]byte) { + txShardPool := cache + + keys := txShardPool.Keys() + transactions := make([]data.TransactionHandler, 0, len(keys)) + txHashes := make([][]byte, 0, len(keys)) + + mTxHashes := make(map[uint64][][]byte, len(keys)) + mTransactions := make(map[uint64][]data.TransactionHandler, len(keys)) + + nonces := make([]uint64, 0, len(keys)) + + for _, key := range keys { + val, _ := txShardPool.Peek(key) + if val == nil { + continue + } + + tx, ok := val.(data.TransactionHandler) + if !ok { + continue + } + + nonce := tx.GetNonce() + if mTxHashes[nonce] == nil { + nonces = append(nonces, nonce) + mTxHashes[nonce] = make([][]byte, 0) + mTransactions[nonce] = make([]data.TransactionHandler, 0) + } + + mTxHashes[nonce] = append(mTxHashes[nonce], key) + mTransactions[nonce] = append(mTransactions[nonce], tx) + } + + sort.Slice(nonces, func(i, j int) bool { + return nonces[i] < nonces[j] + }) + + for _, nonce := range nonces { + keys := mTxHashes[nonce] + + for idx, key := range keys { + txHashes = append(txHashes, key) + transactions = append(transactions, mTransactions[nonce][idx]) + } + } + + return transaction.TrimSliceHandler(transactions), sliceUtil.TrimSliceSliceByte(txHashes) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (adapter *adapterGenericCacheToSortedTransactionsProvider) IsInterfaceNil() bool { + return adapter == nil +} diff --git a/process/block/preprocess/adapterGenericCacheToSortedTransactionsProvider_test.go b/process/block/preprocess/adapterGenericCacheToSortedTransactionsProvider_test.go new file mode 100644 index 00000000000..26ee38b76a7 --- /dev/null +++ b/process/block/preprocess/adapterGenericCacheToSortedTransactionsProvider_test.go @@ -0,0 +1,190 @@ +package preprocess + +import ( + "bytes" + "fmt" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/require" +) + +var randomizer *rand.Rand +var randomizerMutex sync.Mutex + +func init() { + randomizer = rand.New(rand.NewSource(time.Now().UnixNano())) +} + +func TestSortTxByNonce_EmptyCacherShouldReturnEmpty(t *testing.T) { + t.Parallel() + + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) + transactions, txHashes := sortTxByNonce(cacher) + + require.Equal(t, 0, len(transactions)) + require.Equal(t, 0, len(txHashes)) +} + +func TestSortTxByNonce_OneTxShouldWork(t *testing.T) { + t.Parallel() + + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) + hash, tx := createRandTx(randomizer) + cacher.HasOrAdd(hash, tx) + transactions, txHashes := sortTxByNonce(cacher) + + require.Equal(t, 1, len(transactions)) + require.Equal(t, 1, len(txHashes)) + require.True(t, hashInSlice(hash, txHashes)) + require.True(t, txInSlice(tx, transactions)) +} + +func createRandTx(rand *rand.Rand) ([]byte, *transaction.Transaction) { + randomizerMutex.Lock() + nonce := rand.Uint64() + randomizerMutex.Unlock() + + tx := &transaction.Transaction{ + Nonce: nonce, + } + + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + + return hash, tx +} + +func hashInSlice(hash []byte, hashes [][]byte) bool { + for _, item := range hashes { + if bytes.Equal(item, hash) { + return true + } + } + return false +} + +func txInSlice(tx *transaction.Transaction, transactions []data.TransactionHandler) bool { + for _, item := range transactions { + if reflect.DeepEqual(tx, item) { + return true + } + } + return false +} + +func TestSortTxByNonce_MoreTransactionsShouldRetSameSize(t *testing.T) { + t.Parallel() + + cache, genTransactions, _ := genCacherTransactionsHashes(100) + transactions, txHashes := sortTxByNonce(cache) + + require.Equal(t, len(genTransactions), len(transactions)) + require.Equal(t, len(genTransactions), len(txHashes)) +} + +func TestSortTxByNonce_MoreTransactionsShouldContainSameElements(t *testing.T) { + t.Parallel() + + cache, genTransactions, genHashes := genCacherTransactionsHashes(100) + transactions, txHashes := sortTxByNonce(cache) + + for i := 0; i < len(genTransactions); i++ { + require.True(t, hashInSlice(genHashes[i], txHashes)) + require.True(t, txInSlice(genTransactions[i], transactions)) + } +} + +func TestSortTxByNonce_MoreTransactionsShouldContainSortedElements(t *testing.T) { + t.Parallel() + + cache, _, _ := genCacherTransactionsHashes(100) + transactions, _ := sortTxByNonce(cache) + lastNonce := uint64(0) + + for i := 0; i < len(transactions); i++ { + tx := transactions[i] + require.True(t, lastNonce <= tx.GetNonce()) + fmt.Println(tx.GetNonce()) + lastNonce = tx.GetNonce() + } +} + +func TestSortTxByNonce_TransactionsWithSameNonceShouldGetSorted(t *testing.T) { + t.Parallel() + + transactions := []*transaction.Transaction{ + {Nonce: 1, Signature: []byte("sig1")}, + {Nonce: 2, Signature: []byte("sig2")}, + {Nonce: 1, Signature: []byte("sig3")}, + {Nonce: 2, Signature: []byte("sig4")}, + {Nonce: 3, Signature: []byte("sig5")}, + } + + cache, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(len(transactions)), 1) + + for _, tx := range transactions { + marshalizer := &mock.MarshalizerMock{} + buffTx, _ := marshalizer.Marshal(tx) + hash := mock.HasherMock{}.Compute(string(buffTx)) + + cache.Put(hash, tx) + } + + sortedTxs, _ := sortTxByNonce(cache) + lastNonce := uint64(0) + for i := 0; i < len(sortedTxs); i++ { + tx := sortedTxs[i] + require.True(t, lastNonce <= tx.GetNonce()) + lastNonce = tx.GetNonce() + } + + require.Equal(t, len(sortedTxs), len(transactions)) + + //test if one transaction from transactions might not be in sortedTx + for _, tx := range transactions { + found := false + for _, stx := range sortedTxs { + if reflect.DeepEqual(tx, stx) { + found = true + break + } + } + if !found { + require.Fail(t, "Not found tx in sorted slice for sig: "+string(tx.Signature)) + } + } +} + +func BenchmarkSortTxByNonce1(b *testing.B) { + cache, _, _ := genCacherTransactionsHashes(10000) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = sortTxByNonce(cache) + } +} + +func genCacherTransactionsHashes(noOfTx int) (storage.Cacher, []*transaction.Transaction, [][]byte) { + cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(noOfTx), 1) + genHashes := make([][]byte, 0) + genTransactions := make([]*transaction.Transaction, 0) + + for i := 0; i < noOfTx; i++ { + hash, tx := createRandTx(randomizer) + cacher.HasOrAdd(hash, tx) + + genHashes = append(genHashes, hash) + genTransactions = append(genTransactions, tx) + } + + return cacher, genTransactions, genHashes +} diff --git a/process/block/preprocess/adapterTxCacheToSortedTransactionsProvider.go b/process/block/preprocess/adapterTxCacheToSortedTransactionsProvider.go new file mode 100644 index 00000000000..9c3e38de75a --- /dev/null +++ b/process/block/preprocess/adapterTxCacheToSortedTransactionsProvider.go @@ -0,0 +1,30 @@ +package preprocess + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage/txcache" +) + +type adapterTxCacheToSortedTransactionsProvider struct { + txCache *txcache.TxCache +} + +func newAdapterTxCacheToSortedTransactionsProvider(txCache *txcache.TxCache) *adapterTxCacheToSortedTransactionsProvider { + adapter := &adapterTxCacheToSortedTransactionsProvider{ + txCache: txCache, + } + + return adapter +} + +// GetSortedTransactions gets the transactions from the cache +func (adapter *adapterTxCacheToSortedTransactionsProvider) GetSortedTransactions() ([]data.TransactionHandler, [][]byte) { + txs, txHashes := adapter.txCache.GetTransactions(process.MaxItemsInBlock, process.NumTxPerSenderBatchForFillingMiniblock) + return txs, txHashes +} + +// IsInterfaceNil returns true if there is no value under the interface +func (adapter *adapterTxCacheToSortedTransactionsProvider) IsInterfaceNil() bool { + return adapter == nil +} diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 8f0194d49ee..b28fd6b112f 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -137,7 +137,9 @@ func (bpp *basePreProcess) baseReceivedTransaction( txHash []byte, forBlock *txsForBlock, txPool dataRetriever.ShardedDataCacherNotifier, + blockType block.Type, ) bool { + searchFirst := blockType == block.InvalidBlock forBlock.mutTxsForBlock.Lock() if forBlock.missingTxs > 0 { @@ -148,7 +150,8 @@ func (bpp *basePreProcess) baseReceivedTransaction( txInfoForHash.senderShardID, txInfoForHash.receiverShardID, txHash, - txPool) + txPool, + searchFirst) if tx != nil { forBlock.txHashAndInfo[string(txHash)].tx = tx @@ -173,6 +176,7 @@ func (bpp *basePreProcess) computeExistingAndMissing( txPool dataRetriever.ShardedDataCacherNotifier, ) map[uint32][]*txsHashesInfo { + searchFirst := currType == block.InvalidBlock missingTxsForShard := make(map[uint32][]*txsHashesInfo, len(body)) txHashes := make([][]byte, 0, initialTxHashesSliceLen) forBlock.mutTxsForBlock.Lock() @@ -190,7 +194,8 @@ func (bpp *basePreProcess) computeExistingAndMissing( miniBlock.SenderShardID, miniBlock.ReceiverShardID, txHash, - txPool) + txPool, + searchFirst) if err != nil { txHashes = append(txHashes, txHash) diff --git a/process/block/preprocess/interfaces.go b/process/block/preprocess/interfaces.go new file mode 100644 index 00000000000..ef3c2195b7c --- /dev/null +++ b/process/block/preprocess/interfaces.go @@ -0,0 +1,9 @@ +package preprocess + +import "github.com/ElrondNetwork/elrond-go/data" + +// SortedTransactionsProvider defines the public API of the transactions cache +type SortedTransactionsProvider interface { + GetSortedTransactions() ([]data.TransactionHandler, [][]byte) + IsInterfaceNil() bool +} diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index 6320e007348..7a0c3f25f74 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -293,7 +293,7 @@ func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { // receivedRewardTransaction is a callback function called when a new reward transaction // is added in the reward transactions pool func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { - receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) + receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool, block.RewardsBlock) if receivedAllMissing { rtp.chReceivedAllRewardTxs <- true @@ -415,6 +415,7 @@ func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(miniBlock * miniBlock.ReceiverShardID, txHash, rtp.rewardTxPool, + false, ) if tx == nil { @@ -462,16 +463,6 @@ func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( return rewardTxs, txHashes, nil } -// CreateAndProcessMiniBlock creates the miniblock from storage and processes the reward transactions added into the miniblock -func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock( - _, _ uint32, - _ int, - _ func() bool, -) (*block.MiniBlock, error) { - - return nil, nil -} - // CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks // as long as it has time func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlocks( diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go index 75199805dbe..008f6c09b71 100644 --- a/process/block/preprocess/rewardsHandler.go +++ b/process/block/preprocess/rewardsHandler.go @@ -30,10 +30,11 @@ type rewardsHandler struct { protocolRewardsMeta []data.TransactionHandler feeRewards []data.TransactionHandler - mut sync.Mutex - accumulatedFees *big.Int - rewardTxsForBlock map[string]*rewardTx.RewardTx - economicsRewards process.RewardsHandler + mut sync.Mutex + accumulatedFees *big.Int + rewardTxsForBlock map[string]*rewardTx.RewardTx + economicsRewards process.RewardsHandler + intraShardMiniBlock *block.MiniBlock } // NewRewardTxHandler constructor for the reward transaction handler @@ -160,6 +161,10 @@ func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlo miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) + if _, ok := miniBlocks[rtxh.shardCoordinator.SelfId()]; ok { + rtxh.intraShardMiniBlock = miniBlocks[rtxh.shardCoordinator.SelfId()].Clone() + } + return miniBlocks } @@ -216,6 +221,17 @@ func (rtxh *rewardsHandler) miniblocksFromRewardTxs( return miniBlocks } +func (rtxh *rewardsHandler) GetCreatedInShardMiniBlock() *block.MiniBlock { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + if rtxh.intraShardMiniBlock == nil { + return nil + } + + return rtxh.intraShardMiniBlock.Clone() +} + // VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block func (rtxh *rewardsHandler) VerifyInterMiniBlocks(_ block.Body) error { err := rtxh.verifyCreatedRewardsTxs() @@ -266,6 +282,7 @@ func (rtxh *rewardsHandler) cleanCachedData() { rtxh.mut.Lock() rtxh.accumulatedFees = big.NewInt(0) rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + rtxh.intraShardMiniBlock = nil rtxh.mut.Unlock() rtxh.mutGenRewardTxs.Lock() @@ -273,6 +290,7 @@ func (rtxh *rewardsHandler) cleanCachedData() { rtxh.protocolRewards = make([]data.TransactionHandler, 0) rtxh.protocolRewardsMeta = make([]data.TransactionHandler, 0) rtxh.mutGenRewardTxs.Unlock() + } func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { @@ -474,8 +492,5 @@ func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.Transacti // IsInterfaceNil returns true if there is no value under the interface func (rtxh *rewardsHandler) IsInterfaceNil() bool { - if rtxh == nil { - return true - } - return false + return rtxh == nil } diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 93665adbf51..8a05d5f420e 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -270,7 +270,7 @@ func (scr *smartContractResults) SaveTxBlockToStorage(body block.Body) error { // receivedSmartContractResult is a call back function which is called when a new smartContractResult // is added in the smartContractResult pool func (scr *smartContractResults) receivedSmartContractResult(txHash []byte) { - receivedAllMissing := scr.baseReceivedTransaction(txHash, &scr.scrForBlock, scr.scrPool) + receivedAllMissing := scr.baseReceivedTransaction(txHash, &scr.scrForBlock, scr.scrPool, block.SmartContractResultBlock) if receivedAllMissing { scr.chRcvAllScrs <- true @@ -389,7 +389,8 @@ func (scr *smartContractResults) computeMissingScrsForMiniBlock(miniBlock *block miniBlock.SenderShardID, miniBlock.ReceiverShardID, txHash, - scr.scrPool) + scr.scrPool, + false) if tx == nil || tx.IsInterfaceNil() { missingSmartContractResults = append(missingSmartContractResults, txHash) @@ -436,16 +437,6 @@ func (scr *smartContractResults) getAllScrsFromMiniBlock( return smartContractResult.TrimSlicePtr(smartContractResults), sliceUtil.TrimSliceSliceByte(txHashes), nil } -// CreateAndProcessMiniBlock creates the miniblock from storage and processes the smartContractResults added into the miniblock -func (scr *smartContractResults) CreateAndProcessMiniBlock( - _, _ uint32, - _ int, - _ func() bool, -) (*block.MiniBlock, error) { - - return nil, nil -} - // CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks // as long as it has time func (scr *smartContractResults) CreateAndProcessMiniBlocks( diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 2ea5ce3cacf..a742ca122cb 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -50,7 +50,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilStore(t *testing. tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), nil, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -72,7 +72,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilHasher(t *testing tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, nil, &mock.MarshalizerMock{}, @@ -94,7 +94,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilMarsalizer(t *tes tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, nil, @@ -116,7 +116,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilTxProce(t *testin tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -138,7 +138,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilShardCoord(t *tes tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -160,7 +160,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilAccounts(t *testi tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -181,7 +181,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilRequestFunc(t *te tdp := initDataPool() txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -203,7 +203,7 @@ func TestScrsPreprocessor_NewSmartContractResultPreprocessorNilGasHandler(t *tes tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, err := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -238,7 +238,7 @@ func TestScrsPreProcessor_GetTransactionFromPool(t *testing.T) { ) txHash := []byte("tx1_hash") - tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.UnsignedTransactions()) + tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.UnsignedTransactions(), false) assert.NotNil(t, txs) assert.NotNil(t, tx) assert.Equal(t, uint64(10), tx.(*smartContractResult.SmartContractResult).Nonce) @@ -251,7 +251,7 @@ func TestScrsPreprocessor_RequestTransactionNothingToRequestAsGeneratedAtProcess requestTransaction := func(shardID uint32, txHashes [][]byte) {} shardCoord := mock.NewMultiShardsCoordinatorMock(3) txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -285,7 +285,7 @@ func TestScrsPreprocessor_RequestTransactionFromNetwork(t *testing.T) { requestTransaction := func(shardID uint32, txHashes [][]byte) {} shardCoord := mock.NewMultiShardsCoordinatorMock(3) txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -318,7 +318,7 @@ func TestScrsPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *tes tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -471,7 +471,7 @@ func TestScrsPreprocessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *testing.T) tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -495,7 +495,7 @@ func TestScrsPreprocessor_RemoveBlockTxsFromPoolOK(t *testing.T) { tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -532,7 +532,7 @@ func TestScrsPreprocessor_IsDataPreparedErr(t *testing.T) { requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -556,7 +556,7 @@ func TestScrsPreprocessor_IsDataPrepared(t *testing.T) { requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -585,7 +585,7 @@ func TestScrsPreprocessor_SaveTxBlockToStorage(t *testing.T) { requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -621,7 +621,7 @@ func TestScrsPreprocessor_SaveTxBlockToStorageMissingTransactionsShouldErr(t *te tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -659,7 +659,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} scr, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -694,7 +694,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { txshardInfo := txShardInfo{0, 0} smartcr := smartContractResult.SmartContractResult{ Nonce: 1, - Data: "tx", + Data: []byte("tx"), } scr.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&smartcr, &txshardInfo} @@ -728,7 +728,7 @@ func TestScrsPreprocessor_ProcessMiniBlock(t *testing.T) { requestTransaction := func(shardID uint32, txHashes [][]byte) {} scr, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -770,7 +770,7 @@ func TestScrsPreprocessor_ProcessMiniBlockWrongTypeMiniblockShouldErr(t *testing requestTransaction := func(shardID uint32, txHashes [][]byte) {} scr, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, @@ -877,7 +877,7 @@ func TestScrsPreprocessor__RestoreTxBlockIntoPoolsNilMiniblockPoolShouldErr(t *t requestTransaction := func(shardID uint32, txHashes [][]byte) {} scr, _ := NewSmartContractResultPreprocessor( - tdp.Transactions(), + tdp.UnsignedTransactions(), &mock.ChainStorerMock{}, &mock.HasherMock{}, &mock.MarshalizerMock{}, diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 01638e8dde4..7e8c7c41121 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -1,8 +1,7 @@ package preprocess import ( - "fmt" - "sort" + "errors" "sync" "time" @@ -35,10 +34,11 @@ type transactions struct { storage dataRetriever.StorageService txProcessor process.TransactionProcessor accounts state.AccountsAdapter - orderedTxs map[string][]*transaction.Transaction + orderedTxs map[string][]data.TransactionHandler orderedTxHashes map[string][][]byte mutOrderedTxs sync.RWMutex miniBlocksCompacter process.MiniBlocksCompacter + blockType block.Type } // NewTransactionPreprocessor creates a new transaction preprocessor object @@ -54,6 +54,7 @@ func NewTransactionPreprocessor( economicsFee process.FeeHandler, miniBlocksCompacter process.MiniBlocksCompacter, gasHandler process.GasHandler, + blockType block.Type, ) (*transactions, error) { if check.IfNil(hasher) { @@ -106,13 +107,14 @@ func NewTransactionPreprocessor( txProcessor: txProcessor, accounts: accounts, miniBlocksCompacter: miniBlocksCompacter, + blockType: blockType, } txs.chRcvAllTxs = make(chan bool) txs.txPool.RegisterHandler(txs.receivedTransaction) txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) - txs.orderedTxs = make(map[string][]*transaction.Transaction) + txs.orderedTxs = make(map[string][]data.TransactionHandler) txs.orderedTxHashes = make(map[string][][]byte) return &txs, nil @@ -157,7 +159,7 @@ func (txs *transactions) RemoveTxBlockFromPools(body block.Body, miniBlockPool s return process.ErrNilMiniBlockPool } - err := txs.removeDataFromPools(body, miniBlockPool, txs.txPool, block.TxBlock) + err := txs.removeDataFromPools(body, miniBlockPool, txs.txPool, txs.blockType) return err } @@ -221,7 +223,7 @@ func (txs *transactions) ProcessBlockTransactions( // basic validation already done in interceptors for i := 0; i < len(expandedMiniBlocks); i++ { miniBlock := expandedMiniBlocks[i] - if miniBlock.Type != block.TxBlock { + if miniBlock.Type != txs.blockType { continue } @@ -255,7 +257,7 @@ func (txs *transactions) ProcessBlockTransactions( miniBlock.ReceiverShardID, ) - if err != nil { + if err != nil && !errors.Is(err, process.ErrFailedTransaction) { return err } @@ -296,7 +298,7 @@ func (txs *transactions) SaveTxBlockToStorage(body block.Body) error { // receivedTransaction is a call back function which is called when a new transaction // is added in the transaction pool func (txs *transactions) receivedTransaction(txHash []byte) { - receivedAllMissing := txs.baseReceivedTransaction(txHash, &txs.txsForCurrBlock, txs.txPool) + receivedAllMissing := txs.baseReceivedTransaction(txHash, &txs.txsForCurrBlock, txs.txPool, txs.blockType) if receivedAllMissing { txs.chRcvAllTxs <- true @@ -313,7 +315,7 @@ func (txs *transactions) CreateBlockStarted() { txs.txsForCurrBlock.mutTxsForBlock.Unlock() txs.mutOrderedTxs.Lock() - txs.orderedTxs = make(map[string][]*transaction.Transaction) + txs.orderedTxs = make(map[string][]data.TransactionHandler) txs.orderedTxHashes = make(map[string][][]byte) txs.mutOrderedTxs.Unlock() } @@ -354,7 +356,7 @@ func (txs *transactions) computeMissingAndExistingTxsForShards(body block.Body) body, &txs.txsForCurrBlock, txs.chRcvAllTxs, - block.TxBlock, + txs.blockType, txs.txPool) return missingTxsForShard @@ -375,7 +377,7 @@ func (txs *transactions) processAndRemoveBadTransaction( txs.txPool.RemoveData(transactionHash, strCache) } - if err != nil { + if err != nil && !errors.Is(err, process.ErrFailedTransaction) { return err } @@ -384,7 +386,7 @@ func (txs *transactions) processAndRemoveBadTransaction( txs.txsForCurrBlock.txHashAndInfo[string(transactionHash)] = &txInfo{tx: transaction, txShardInfo: txShardInfo} txs.txsForCurrBlock.mutTxsForBlock.Unlock() - return nil + return err } // RequestTransactionsForMiniBlock requests missing transactions for a certain miniblock @@ -403,17 +405,20 @@ func (txs *transactions) RequestTransactionsForMiniBlock(miniBlock *block.MiniBl // computeMissingTxsForMiniBlock computes missing transactions for a certain miniblock func (txs *transactions) computeMissingTxsForMiniBlock(miniBlock *block.MiniBlock) [][]byte { - if miniBlock.Type != block.TxBlock { + if miniBlock.Type != txs.blockType { return nil } missingTransactions := make([][]byte, 0, len(miniBlock.TxHashes)) + searchFirst := txs.blockType == block.InvalidBlock + for _, txHash := range miniBlock.TxHashes { tx, _ := process.GetTransactionHandlerFromPool( miniBlock.SenderShardID, miniBlock.ReceiverShardID, txHash, - txs.txPool) + txs.txPool, + searchFirst) if tx == nil || tx.IsInterfaceNil() { missingTransactions = append(missingTransactions, txHash) @@ -471,7 +476,7 @@ func (txs *transactions) CreateAndProcessMiniBlocks( newMBAdded := true txSpaceRemained := int(maxTxSpaceRemained) - miniBlock, err := txs.CreateAndProcessMiniBlock( + miniBlock, err := txs.createAndProcessMiniBlock( txs.shardCoordinator.SelfId(), sharding.MetachainShardId, txSpaceRemained, @@ -498,7 +503,7 @@ func (txs *transactions) CreateAndProcessMiniBlocks( break } - miniBlock, err := txs.CreateAndProcessMiniBlock( + miniBlock, err := txs.createAndProcessMiniBlock( txs.shardCoordinator.SelfId(), shardId, txSpaceRemained, @@ -522,15 +527,15 @@ func (txs *transactions) CreateAndProcessMiniBlocks( } // CreateAndProcessMiniBlock creates the miniblock from storage and processes the transactions added into the miniblock -func (txs *transactions) CreateAndProcessMiniBlock( +func (txs *transactions) createAndProcessMiniBlock( senderShardId uint32, receiverShardId uint32, spaceRemained int, haveTime func() bool, ) (*block.MiniBlock, error) { - - var orderedTxs []*transaction.Transaction - var orderedTxHashes [][]byte + if txs.blockType != block.TxBlock { + return &block.MiniBlock{}, nil + } timeBefore := time.Now() orderedTxs, orderedTxHashes, err := txs.computeOrderedTxs(senderShardId, receiverShardId) @@ -565,11 +570,15 @@ func (txs *transactions) CreateAndProcessMiniBlock( gasConsumedByMiniBlockInReceiverShard := uint64(0) for index := range orderedTxs { + txHandler := orderedTxs[index] + tx := txHandler.(*transaction.Transaction) + txHash := orderedTxHashes[index] + if !haveTime() { break } - if txs.isTxAlreadyProcessed(orderedTxHashes[index], &txs.txsForCurrBlock) { + if txs.isTxAlreadyProcessed(txHash, &txs.txsForCurrBlock) { continue } @@ -580,8 +589,8 @@ func (txs *transactions) CreateAndProcessMiniBlock( err = txs.computeGasConsumed( miniBlock.SenderShardID, miniBlock.ReceiverShardID, - orderedTxs[index], - orderedTxHashes[index], + tx, + txHash, &gasConsumedByMiniBlockInSenderShard, &gasConsumedByMiniBlockInReceiverShard) @@ -591,16 +600,16 @@ func (txs *transactions) CreateAndProcessMiniBlock( // execute transaction to change the trie root hash err = txs.processAndRemoveBadTransaction( - orderedTxHashes[index], - orderedTxs[index], + txHash, + tx, miniBlock.SenderShardID, miniBlock.ReceiverShardID, ) - if err != nil { + if err != nil && !errors.Is(err, process.ErrFailedTransaction) { log.Trace("bad tx", "error", err.Error(), - "hash", orderedTxHashes[index], + "hash", txHash, ) err = txs.accounts.RevertToSnapshot(snapshot) @@ -608,8 +617,8 @@ func (txs *transactions) CreateAndProcessMiniBlock( log.Debug("revert to snapshot", "error", err.Error()) } - txs.gasHandler.RemoveGasConsumed([][]byte{orderedTxHashes[index]}) - txs.gasHandler.RemoveGasRefunded([][]byte{orderedTxHashes[index]}) + txs.gasHandler.RemoveGasConsumed([][]byte{txHash}) + txs.gasHandler.RemoveGasRefunded([][]byte{txHash}) gasConsumedByMiniBlockInSenderShard = oldGasConsumedByMiniBlockInSenderShard gasConsumedByMiniBlockInReceiverShard = oldGasConsumedByMiniBlockInReceiverShard @@ -617,13 +626,15 @@ func (txs *transactions) CreateAndProcessMiniBlock( continue } - gasRefunded := txs.gasHandler.GasRefunded(orderedTxHashes[index]) + gasRefunded := txs.gasHandler.GasRefunded(txHash) gasConsumedByMiniBlockInReceiverShard -= gasRefunded if senderShardId == receiverShardId { gasConsumedByMiniBlockInSenderShard -= gasRefunded } - miniBlock.TxHashes = append(miniBlock.TxHashes, orderedTxHashes[index]) + if !errors.Is(err, process.ErrFailedTransaction) { + miniBlock.TxHashes = append(miniBlock.TxHashes, txHash) + } addedTxs++ if addedTxs >= spaceRemained { // max transactions count in one block was reached @@ -632,23 +643,24 @@ func (txs *transactions) CreateAndProcessMiniBlock( "total txs", len(orderedTxs), ) - log.Debug(fmt.Sprintf("gas consumed: %d in mini block in sender shard, %d in mini block in receiver shard, %d in block in self shard: added %d txs from %d txs\n", gasConsumedByMiniBlockInSenderShard, - gasConsumedByMiniBlockInReceiverShard, - txs.gasHandler.TotalGasConsumed(), - len(miniBlock.TxHashes), - len(orderedTxs))) + log.Debug("mini block info", + "gas consumed in sender shard", gasConsumedByMiniBlockInSenderShard, + "gas consumed in receiver shard", gasConsumedByMiniBlockInReceiverShard, + "gas consumed in self shard", txs.gasHandler.TotalGasConsumed(), + "txs ordered", len(orderedTxs), + "txs added", len(miniBlock.TxHashes)) return miniBlock, nil } } if addedTxs > 0 { - log.Debug(fmt.Sprintf("gas consumed: %d in mini block in sender shard, %d in mini block in receiver shard, %d in block in self shard: added %d txs from %d txs\n", - gasConsumedByMiniBlockInSenderShard, - gasConsumedByMiniBlockInReceiverShard, - txs.gasHandler.TotalGasConsumed(), - len(miniBlock.TxHashes), - len(orderedTxs))) + log.Debug("mini block info", + "gas consumed in sender shard", gasConsumedByMiniBlockInSenderShard, + "gas consumed in receiver shard", gasConsumedByMiniBlockInReceiverShard, + "gas consumed in self shard", txs.gasHandler.TotalGasConsumed(), + "txs ordered", len(orderedTxs), + "txs added", len(miniBlock.TxHashes)) } return miniBlock, nil @@ -657,10 +669,7 @@ func (txs *transactions) CreateAndProcessMiniBlock( func (txs *transactions) computeOrderedTxs( sndShardId uint32, dstShardId uint32, -) ([]*transaction.Transaction, [][]byte, error) { - - var err error - +) ([]data.TransactionHandler, [][]byte, error) { strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) txShardPool := txs.txPool.ShardDataStore(strCache) @@ -671,31 +680,9 @@ func (txs *transactions) computeOrderedTxs( return nil, nil, process.ErrEmptyTxDataPool } - txs.mutOrderedTxs.RLock() - orderedTxs := txs.orderedTxs[strCache] - orderedTxHashes := txs.orderedTxHashes[strCache] - txs.mutOrderedTxs.RUnlock() - - alreadyOrdered := len(orderedTxs) > 0 - if !alreadyOrdered { - orderedTxs, orderedTxHashes, err = SortTxByNonce(txShardPool) - if err != nil { - return nil, nil, err - } - - log.Debug("creating mini blocks has been started", - "have num txs", len(orderedTxs), - "snd shard", sndShardId, - "dest shard", dstShardId, - ) - - txs.mutOrderedTxs.Lock() - txs.orderedTxs[strCache] = orderedTxs - txs.orderedTxHashes[strCache] = orderedTxHashes - txs.mutOrderedTxs.Unlock() - } - - return orderedTxs, orderedTxHashes, nil + sortedTransactionsProvider := createSortedTransactionsProvider(txs, txShardPool, strCache) + sortedTxs, sortedTxsHashes := sortedTransactionsProvider.GetSortedTransactions() + return sortedTxs, sortedTxsHashes, nil } // ProcessMiniBlock processes all the transactions from a and saves the processed transactions in local cache complete miniblock @@ -703,6 +690,9 @@ func (txs *transactions) ProcessMiniBlock( miniBlock *block.MiniBlock, haveTime func() bool, ) error { + if txs.blockType != block.TxBlock { + return nil + } if miniBlock.Type != block.TxBlock { return process.ErrWrongTypeInMiniBlock @@ -769,58 +759,6 @@ func (txs *transactions) ProcessMiniBlock( return nil } -// SortTxByNonce sort transactions according to nonces -func SortTxByNonce(txShardPool storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { - if txShardPool == nil { - return nil, nil, process.ErrNilTxDataPool - } - - keys := txShardPool.Keys() - transactions := make([]*transaction.Transaction, 0, len(keys)) - txHashes := make([][]byte, 0, len(keys)) - - mTxHashes := make(map[uint64][][]byte, len(keys)) - mTransactions := make(map[uint64][]*transaction.Transaction, len(keys)) - - nonces := make([]uint64, 0, len(keys)) - - for _, key := range keys { - val, _ := txShardPool.Peek(key) - if val == nil { - continue - } - - tx, ok := val.(*transaction.Transaction) - if !ok { - continue - } - - if mTxHashes[tx.Nonce] == nil { - nonces = append(nonces, tx.Nonce) - mTxHashes[tx.Nonce] = make([][]byte, 0) - mTransactions[tx.Nonce] = make([]*transaction.Transaction, 0) - } - - mTxHashes[tx.Nonce] = append(mTxHashes[tx.Nonce], key) - mTransactions[tx.Nonce] = append(mTransactions[tx.Nonce], tx) - } - - sort.Slice(nonces, func(i, j int) bool { - return nonces[i] < nonces[j] - }) - - for _, nonce := range nonces { - keys := mTxHashes[nonce] - - for idx, key := range keys { - txHashes = append(txHashes, key) - transactions = append(transactions, mTransactions[nonce][idx]) - } - } - - return transaction.TrimSlicePtr(transactions), sliceUtil.TrimSliceSliceByte(txHashes), nil -} - // CreateMarshalizedData marshalizes transactions and creates and saves them into a new structure func (txs *transactions) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { mrsScrs, err := txs.createMarshalizedData(txHashes, &txs.txsForCurrBlock) diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 3e3fdb8ea5f..b3719952a03 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -3,13 +3,9 @@ package preprocess import ( "bytes" "encoding/hex" - "fmt" "math/big" - "math/rand" "reflect" - "sync" "testing" - "time" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto/signing" @@ -20,7 +16,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" @@ -143,9 +139,6 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - }, MetaBlocksCalled: func() storage.Cacher { return &mock.CacherStub{ GetCalled: func(key []byte) (value interface{}, ok bool) { @@ -191,9 +184,9 @@ func initDataPool() *mock.PoolsHolderStub { cs.RemoveCalled = func(key []byte) {} return cs }, - HeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + HeadersCalled: func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } return cs }, @@ -217,6 +210,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilPool(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -240,6 +234,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilStore(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -263,6 +258,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilHasher(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -286,6 +282,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilMarsalizer(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -309,6 +306,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilTxProce(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -332,6 +330,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilShardCoord(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -355,6 +354,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilAccounts(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -377,6 +377,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilRequestFunc(t *testing.T) feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -400,6 +401,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilFeeHandler(t *testing.T) { nil, miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -423,6 +425,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilMiniBlocksCompacter(t *tes feeHandlerMock(), nil, &mock.GasHandlerMock{}, + block.TxBlock, ) assert.Nil(t, txs) @@ -446,6 +449,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilGasHandler(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), nil, + block.TxBlock, ) assert.Nil(t, txs) @@ -468,9 +472,10 @@ func TestTxsPreProcessor_GetTransactionFromPool(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) txHash := []byte("tx1_hash") - tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions()) + tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions(), false) assert.NotNil(t, txs) assert.NotNil(t, tx) assert.Equal(t, uint64(10), tx.(*transaction.Transaction).Nonce) @@ -492,6 +497,7 @@ func TestTransactionPreprocessor_RequestTransactionFromNetwork(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) shardId := uint32(1) txHash1 := []byte("tx_hash1") @@ -522,6 +528,7 @@ func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) shardId := uint32(1) @@ -567,6 +574,7 @@ func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *test feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) //add 3 tx hashes on requested list @@ -643,6 +651,7 @@ func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) mb := &block.MiniBlock{ @@ -680,6 +689,7 @@ func TestTransactionPreprocessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *test feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) err := txs.RemoveTxBlockFromPools(nil, tdp.MiniBlocks()) assert.NotNil(t, err) @@ -702,6 +712,7 @@ func TestTransactionPreprocessor_RemoveBlockTxsFromPoolOK(t *testing.T) { feeHandlerMock(), miniBlocksCompacterMock(), &mock.GasHandlerMock{}, + block.TxBlock, ) body := make(block.Body, 0) txHash := []byte("txHash") @@ -720,7 +731,7 @@ func TestTransactionPreprocessor_RemoveBlockTxsFromPoolOK(t *testing.T) { func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) requestTransaction := func(shardID uint32, txHashes [][]byte) {} hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -754,6 +765,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testi return 0 }, }, + block.TxBlock, ) assert.NotNil(t, txs) @@ -771,7 +783,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testi addedTxs = append(addedTxs, newTx) } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue) + mb, err := txs.createAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue) assert.Nil(t, err) assert.Equal(t, len(addedTxs), len(mb.TxHashes)) @@ -780,7 +792,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testi func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCalls(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) requestTransaction := func(shardID uint32, txHashes [][]byte) {} hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -814,6 +826,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCal return 0 }, }, + block.TxBlock, ) assert.NotNil(t, txs) @@ -833,7 +846,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCal addedTxs = append(addedTxs, newTx) } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue) + mb, err := txs.createAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue) assert.Nil(t, err) assert.Equal(t, len(addedTxs), len(mb.TxHashes)) @@ -842,7 +855,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCal func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddOnly5asSCCall(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) requestTransaction := func(shardID uint32, txHashes [][]byte) {} hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -884,6 +897,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddOnly5asSCCal RemoveGasRefundedCalled: func(hashes [][]byte) { }, }, + block.TxBlock, ) assert.NotNil(t, txs) @@ -902,166 +916,17 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddOnly5asSCCal addedTxs = append(addedTxs, newTx) } - mb, err := txs.CreateAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue) + mb, err := txs.createAndProcessMiniBlock(sndShardId, dstShardId, process.MaxItemsInBlock, haveTimeTrue) assert.Nil(t, err) assert.Equal(t, numTxsToAdd, len(mb.TxHashes)) } -//------- SortTxByNonce - -var r *rand.Rand -var mutex sync.Mutex - -func init() { - r = rand.New(rand.NewSource(time.Now().UnixNano())) -} - -func TestSortTxByNonce_NilTxDataPoolShouldErr(t *testing.T) { - t.Parallel() - transactions, txHashes, err := SortTxByNonce(nil) - assert.Nil(t, transactions) - assert.Nil(t, txHashes) - assert.Equal(t, process.ErrNilTxDataPool, err) -} - -func TestSortTxByNonce_EmptyCacherShouldReturnEmpty(t *testing.T) { - t.Parallel() - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) - transactions, txHashes, err := SortTxByNonce(cacher) - assert.Equal(t, 0, len(transactions)) - assert.Equal(t, 0, len(txHashes)) - assert.Nil(t, err) -} - -func TestSortTxByNonce_OneTxShouldWork(t *testing.T) { - t.Parallel() - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, 100, 1) - hash, tx := createRandTx(r) - cacher.HasOrAdd(hash, tx) - transactions, txHashes, err := SortTxByNonce(cacher) - assert.Equal(t, 1, len(transactions)) - assert.Equal(t, 1, len(txHashes)) - assert.Nil(t, err) - assert.True(t, hashInSlice(hash, txHashes)) - assert.True(t, txInSlice(tx, transactions)) -} - -func createRandTx(rand *rand.Rand) ([]byte, *transaction.Transaction) { - mutex.Lock() - nonce := rand.Uint64() - mutex.Unlock() - tx := &transaction.Transaction{ - Nonce: nonce, - } - marshalizer := &mock.MarshalizerMock{} - buffTx, _ := marshalizer.Marshal(tx) - hash := mock.HasherMock{}.Compute(string(buffTx)) - return hash, tx -} - -func hashInSlice(hash []byte, hashes [][]byte) bool { - for _, h := range hashes { - if bytes.Equal(h, hash) { - return true - } - } - return false -} - -func txInSlice(tx *transaction.Transaction, transactions []*transaction.Transaction) bool { - for _, t := range transactions { - if reflect.DeepEqual(tx, t) { - return true - } - } - return false -} - -func TestSortTxByNonce_MoreTransactionsShouldNotErr(t *testing.T) { - t.Parallel() - cache, _, _ := genCacherTransactionsHashes(100) - _, _, err := SortTxByNonce(cache) - assert.Nil(t, err) -} - -func TestSortTxByNonce_MoreTransactionsShouldRetSameSize(t *testing.T) { - t.Parallel() - cache, genTransactions, _ := genCacherTransactionsHashes(100) - transactions, txHashes, _ := SortTxByNonce(cache) - assert.Equal(t, len(genTransactions), len(transactions)) - assert.Equal(t, len(genTransactions), len(txHashes)) -} - -func TestSortTxByNonce_MoreTransactionsShouldContainSameElements(t *testing.T) { - t.Parallel() - cache, genTransactions, genHashes := genCacherTransactionsHashes(100) - transactions, txHashes, _ := SortTxByNonce(cache) - for i := 0; i < len(genTransactions); i++ { - assert.True(t, hashInSlice(genHashes[i], txHashes)) - assert.True(t, txInSlice(genTransactions[i], transactions)) - } -} - -func TestSortTxByNonce_MoreTransactionsShouldContainSortedElements(t *testing.T) { - t.Parallel() - cache, _, _ := genCacherTransactionsHashes(100) - transactions, _, _ := SortTxByNonce(cache) - lastNonce := uint64(0) - for i := 0; i < len(transactions); i++ { - tx := transactions[i] - assert.True(t, lastNonce <= tx.Nonce) - fmt.Println(tx.Nonce) - lastNonce = tx.Nonce - } -} - -func TestSortTxByNonce_TransactionsWithSameNonceShouldGetSorted(t *testing.T) { - t.Parallel() - transactions := []*transaction.Transaction{ - {Nonce: 1, Signature: []byte("sig1")}, - {Nonce: 2, Signature: []byte("sig2")}, - {Nonce: 1, Signature: []byte("sig3")}, - {Nonce: 2, Signature: []byte("sig4")}, - {Nonce: 3, Signature: []byte("sig5")}, - } - cache, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(len(transactions)), 1) - for _, tx := range transactions { - marshalizer := &mock.MarshalizerMock{} - buffTx, _ := marshalizer.Marshal(tx) - hash := mock.HasherMock{}.Compute(string(buffTx)) - - cache.Put(hash, tx) - } - sortedTxs, _, _ := SortTxByNonce(cache) - lastNonce := uint64(0) - for i := 0; i < len(sortedTxs); i++ { - tx := sortedTxs[i] - assert.True(t, lastNonce <= tx.Nonce) - fmt.Printf("tx.Nonce: %d, tx.Sig: %s\n", tx.Nonce, tx.Signature) - lastNonce = tx.Nonce - } - assert.Equal(t, len(sortedTxs), len(transactions)) - //test if one transaction from transactions might not be in sortedTx - for _, tx := range transactions { - found := false - for _, stx := range sortedTxs { - if reflect.DeepEqual(tx, stx) { - found = true - break - } - } - if !found { - assert.Fail(t, "Not found tx in sorted slice for sig: "+string(tx.Signature)) - } - } -} - func TestMiniBlocksCompaction_CompactAndExpandMiniBlocksShouldResultTheSameMiniBlocks(t *testing.T) { t.Parallel() totalGasConsumed := uint64(0) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) requestTransaction := func(shardID uint32, txHashes [][]byte) {} txs, _ := NewTransactionPreprocessor( txPool, @@ -1096,6 +961,7 @@ func TestMiniBlocksCompaction_CompactAndExpandMiniBlocksShouldResultTheSameMiniB return 0 }, }, + block.TxBlock, ) keygen := signing.NewKeyGenerator(kyber.NewBlakeSHA256Ed25519()) @@ -1191,25 +1057,3 @@ func TestMiniBlocksCompaction_CompactAndExpandMiniBlocksShouldResultTheSameMiniB assert.True(t, reflect.DeepEqual(mbsValues[i], *expandedMbs[i])) } } - -func genCacherTransactionsHashes(noOfTx int) (storage.Cacher, []*transaction.Transaction, [][]byte) { - cacher, _ := storageUnit.NewCache(storageUnit.LRUCache, uint32(noOfTx), 1) - genHashes := make([][]byte, 0) - genTransactions := make([]*transaction.Transaction, 0) - for i := 0; i < noOfTx; i++ { - hash, tx := createRandTx(r) - cacher.HasOrAdd(hash, tx) - - genHashes = append(genHashes, hash) - genTransactions = append(genTransactions, tx) - } - return cacher, genTransactions, genHashes -} - -func BenchmarkSortTxByNonce1(b *testing.B) { - cache, _, _ := genCacherTransactionsHashes(10000) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _, _ = SortTxByNonce(cache) - } -} diff --git a/process/block/processedMb/processedMiniBlocks.go b/process/block/processedMb/processedMiniBlocks.go new file mode 100644 index 00000000000..27c17381712 --- /dev/null +++ b/process/block/processedMb/processedMiniBlocks.go @@ -0,0 +1,137 @@ +package processedMb + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" +) + +var log = logger.GetOrCreate("process/processedMb") + +// MiniBlockHashes will keep a list of miniblock hashes as keys in a map for easy access +type MiniBlockHashes map[string]struct{} + +// ProcessedMiniBlockTracker is used to store all processed mini blocks hashes grouped by a metahash +type ProcessedMiniBlockTracker struct { + processedMiniBlocks map[string]MiniBlockHashes + mutProcessedMiniBlocks sync.RWMutex +} + +// NewProcessedMiniBlocks will create a complex type of processedMb +func NewProcessedMiniBlocks() *ProcessedMiniBlockTracker { + return &ProcessedMiniBlockTracker{ + processedMiniBlocks: make(map[string]MiniBlockHashes), + } +} + +// AddMiniBlockHash will add a miniblock hash +func (pmb *ProcessedMiniBlockTracker) AddMiniBlockHash(metaBlockHash string, miniBlockHash string) { + pmb.mutProcessedMiniBlocks.Lock() + defer pmb.mutProcessedMiniBlocks.Unlock() + + miniBlocksProcessed, ok := pmb.processedMiniBlocks[metaBlockHash] + if !ok { + miniBlocksProcessed = make(MiniBlockHashes) + miniBlocksProcessed[miniBlockHash] = struct{}{} + pmb.processedMiniBlocks[metaBlockHash] = miniBlocksProcessed + + return + } + + miniBlocksProcessed[miniBlockHash] = struct{}{} +} + +// RemoveMetaBlockHash will remove a meta block hash +func (pmb *ProcessedMiniBlockTracker) RemoveMetaBlockHash(metaBlockHash string) { + pmb.mutProcessedMiniBlocks.Lock() + delete(pmb.processedMiniBlocks, metaBlockHash) + pmb.mutProcessedMiniBlocks.Unlock() +} + +// RemoveMiniBlockHash will remove a mini block hash +func (pmb *ProcessedMiniBlockTracker) RemoveMiniBlockHash(miniBlockHash string) { + pmb.mutProcessedMiniBlocks.Lock() + for metaHash, miniBlocksProcessed := range pmb.processedMiniBlocks { + delete(miniBlocksProcessed, miniBlockHash) + + if len(miniBlocksProcessed) == 0 { + delete(pmb.processedMiniBlocks, metaHash) + } + } + pmb.mutProcessedMiniBlocks.Unlock() +} + +// GetProcessedMiniBlocksHashes will return all processed miniblocks for a metablock +func (pmb *ProcessedMiniBlockTracker) GetProcessedMiniBlocksHashes(metaBlockHash string) map[string]struct{} { + pmb.mutProcessedMiniBlocks.RLock() + processedMiniBlocksHashes := pmb.processedMiniBlocks[metaBlockHash] + pmb.mutProcessedMiniBlocks.RUnlock() + + return processedMiniBlocksHashes +} + +// IsMiniBlockProcessed will return true if a mini block is processed +func (pmb *ProcessedMiniBlockTracker) IsMiniBlockProcessed(metaBlockHash string, miniBlockHash string) bool { + pmb.mutProcessedMiniBlocks.RLock() + defer pmb.mutProcessedMiniBlocks.RUnlock() + + miniBlocksProcessed, ok := pmb.processedMiniBlocks[metaBlockHash] + if !ok { + return false + } + + _, isProcessed := miniBlocksProcessed[miniBlockHash] + return isProcessed +} + +// ConvertProcessedMiniBlocksMapToSlice will convert a map[string]map[string]struct{} in a slice of MiniBlocksInMeta +func (pmb *ProcessedMiniBlockTracker) ConvertProcessedMiniBlocksMapToSlice() []bootstrapStorage.MiniBlocksInMeta { + pmb.mutProcessedMiniBlocks.RLock() + defer pmb.mutProcessedMiniBlocks.RUnlock() + + miniBlocksInMetaBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0, len(pmb.processedMiniBlocks)) + for metaHash, miniBlocksHashes := range pmb.processedMiniBlocks { + miniBlocksInMeta := bootstrapStorage.MiniBlocksInMeta{ + MetaHash: []byte(metaHash), + MiniBlocksHashes: make([][]byte, 0, len(miniBlocksHashes)), + } + for miniBlockHash := range miniBlocksHashes { + miniBlocksInMeta.MiniBlocksHashes = append(miniBlocksInMeta.MiniBlocksHashes, []byte(miniBlockHash)) + } + miniBlocksInMetaBlocks = append(miniBlocksInMetaBlocks, miniBlocksInMeta) + } + + return miniBlocksInMetaBlocks +} + +// ConvertSliceToProcessedMiniBlocksMap will convert a slice of MiniBlocksInMeta in an map[string]MiniBlockHashes +func (pmb *ProcessedMiniBlockTracker) ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMetaBlocks []bootstrapStorage.MiniBlocksInMeta) { + pmb.mutProcessedMiniBlocks.Lock() + defer pmb.mutProcessedMiniBlocks.Unlock() + + for _, miniBlocksInMeta := range miniBlocksInMetaBlocks { + miniBlocksHashes := make(MiniBlockHashes) + for _, miniBlockHash := range miniBlocksInMeta.MiniBlocksHashes { + miniBlocksHashes[string(miniBlockHash)] = struct{}{} + } + pmb.processedMiniBlocks[string(miniBlocksInMeta.MetaHash)] = miniBlocksHashes + } + +} + +// DisplayProcessedMiniBlocks will display all miniblocks hashes and meta block hash from the map +func (pmb *ProcessedMiniBlockTracker) DisplayProcessedMiniBlocks() { + log.Debug("processed mini blocks applied") + + pmb.mutProcessedMiniBlocks.RLock() + for metaBlockHash, miniBlocksHashes := range pmb.processedMiniBlocks { + log.Debug("processed", + "meta hash", []byte(metaBlockHash)) + for miniBlockHash := range miniBlocksHashes { + log.Debug("processed", + "mini block hash", []byte(miniBlockHash)) + } + } + pmb.mutProcessedMiniBlocks.RUnlock() +} diff --git a/process/block/processedMb/processedMiniBlocks_test.go b/process/block/processedMb/processedMiniBlocks_test.go new file mode 100644 index 00000000000..7e5a4adde61 --- /dev/null +++ b/process/block/processedMb/processedMiniBlocks_test.go @@ -0,0 +1,80 @@ +package processedMb_test + +import ( + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestProcessedMiniBlocks_AddMiniBlockHashShouldWork(t *testing.T) { + t.Parallel() + + pmb := processedMb.NewProcessedMiniBlocks() + + mbHash1 := "hash1" + mbHash2 := "hash2" + mtbHash1 := "meta1" + mtbHash2 := "meta2" + + pmb.AddMiniBlockHash(mtbHash1, mbHash1) + assert.True(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + + pmb.AddMiniBlockHash(mtbHash2, mbHash1) + assert.True(t, pmb.IsMiniBlockProcessed(mtbHash2, mbHash1)) + + pmb.AddMiniBlockHash(mtbHash1, mbHash2) + assert.True(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash2)) + + pmb.RemoveMiniBlockHash(mbHash1) + assert.False(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + + pmb.RemoveMiniBlockHash(mbHash1) + assert.False(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + + pmb.RemoveMetaBlockHash(mtbHash2) + assert.False(t, pmb.IsMiniBlockProcessed(mtbHash2, mbHash1)) +} + +func TestProcessedMiniBlocks_GetProcessedMiniBlocksHashes(t *testing.T) { + t.Parallel() + + pmb := processedMb.NewProcessedMiniBlocks() + + mbHash1 := "hash1" + mbHash2 := "hash2" + mtbHash1 := "meta1" + mtbHash2 := "meta2" + + pmb.AddMiniBlockHash(mtbHash1, mbHash1) + pmb.AddMiniBlockHash(mtbHash1, mbHash2) + pmb.AddMiniBlockHash(mtbHash2, mbHash2) + + mapData := pmb.GetProcessedMiniBlocksHashes(mtbHash1) + assert.NotNil(t, mapData[mbHash1]) + assert.NotNil(t, mapData[mbHash2]) + + mapData = pmb.GetProcessedMiniBlocksHashes(mtbHash2) + assert.NotNil(t, mapData[mbHash1]) +} + +func TestProcessedMiniBlocks_ConvertSliceToProcessedMiniBlocksMap(t *testing.T) { + t.Parallel() + + pmb := processedMb.NewProcessedMiniBlocks() + + mbHash1 := "hash1" + mtbHash1 := "meta1" + + data1 := bootstrapStorage.MiniBlocksInMeta{ + MetaHash: []byte(mtbHash1), + MiniBlocksHashes: [][]byte{[]byte(mbHash1)}, + } + + miniBlocksInMeta := []bootstrapStorage.MiniBlocksInMeta{data1} + pmb.ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMeta) + assert.True(t, pmb.IsMiniBlockProcessed(mtbHash1, mbHash1)) + + convertedData := pmb.ConvertProcessedMiniBlocksMapToSlice() + assert.Equal(t, miniBlocksInMeta, convertedData) +} diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 4a08f3a8dd0..7f5fec65472 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -3,8 +3,6 @@ package block import ( "bytes" "fmt" - "sort" - "sync" "time" "github.com/ElrondNetwork/elrond-go/core" @@ -14,10 +12,10 @@ import ( "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/display" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -28,14 +26,19 @@ const maxCleanTime = time.Second // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { *baseProcessor - dataPool dataRetriever.PoolsHolder - metaBlockFinality uint32 - chRcvAllMetaHdrs chan bool - processedMiniBlocks map[string]map[string]struct{} - mutProcessedMiniBlocks sync.RWMutex - core serviceContainer.Core - txCounter *transactionCounter - txsPoolsCleaner process.PoolsCleaner + dataPool dataRetriever.PoolsHolder + metaBlockFinality uint32 + chRcvAllMetaHdrs chan bool + + chRcvEpochStart chan bool + + processedMiniBlocks *processedMb.ProcessedMiniBlockTracker + core serviceContainer.Core + txCounter *transactionCounter + txsPoolsCleaner process.PoolsCleaner + + stateCheckpointModulus uint + lowestNonceInSelfNotarizedHeaders uint64 } // NewShardProcessor creates a new shardProcessor object @@ -45,9 +48,12 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return nil, err } - if arguments.DataPool == nil || arguments.DataPool.IsInterfaceNil() { + if check.IfNil(arguments.DataPool) { return nil, process.ErrNilDataPoolHolder } + if check.IfNil(arguments.DataPool.Headers()) { + return nil, process.ErrNilHeadersDataPool + } blockSizeThrottler, err := throttle.NewBlockSizeThrottle() if err != nil { @@ -55,28 +61,26 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { } base := &baseProcessor{ - accounts: arguments.Accounts, - blockSizeThrottler: blockSizeThrottler, - forkDetector: arguments.ForkDetector, - hasher: arguments.Hasher, - marshalizer: arguments.Marshalizer, - store: arguments.Store, - shardCoordinator: arguments.ShardCoordinator, - nodesCoordinator: arguments.NodesCoordinator, - specialAddressHandler: arguments.SpecialAddressHandler, - uint64Converter: arguments.Uint64Converter, - onRequestHeaderHandlerByNonce: arguments.RequestHandler.RequestHeaderByNonce, - appStatusHandler: statusHandler.NewNilStatusHandler(), - blockChainHook: arguments.BlockChainHook, - txCoordinator: arguments.TxCoordinator, - rounder: arguments.Rounder, - bootStorer: arguments.BootStorer, - validatorStatisticsProcessor: arguments.ValidatorStatisticsProcessor, - } - - err = base.setLastNotarizedHeadersSlice(arguments.StartHeaders) - if err != nil { - return nil, err + accounts: arguments.Accounts, + blockSizeThrottler: blockSizeThrottler, + forkDetector: arguments.ForkDetector, + hasher: arguments.Hasher, + marshalizer: arguments.Marshalizer, + store: arguments.Store, + shardCoordinator: arguments.ShardCoordinator, + nodesCoordinator: arguments.NodesCoordinator, + specialAddressHandler: arguments.SpecialAddressHandler, + uint64Converter: arguments.Uint64Converter, + requestHandler: arguments.RequestHandler, + appStatusHandler: statusHandler.NewNilStatusHandler(), + blockChainHook: arguments.BlockChainHook, + txCoordinator: arguments.TxCoordinator, + rounder: arguments.Rounder, + epochStartTrigger: arguments.EpochStartTrigger, + headerValidator: arguments.HeaderValidator, + bootStorer: arguments.BootStorer, + validatorStatisticsProcessor: arguments.ValidatorStatisticsProcessor, + blockTracker: arguments.BlockTracker, } if arguments.TxsPoolsCleaner == nil || arguments.TxsPoolsCleaner.IsInterfaceNil() { @@ -84,11 +88,12 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { } sp := shardProcessor{ - core: arguments.Core, - baseProcessor: base, - dataPool: arguments.DataPool, - txCounter: NewTransactionCounter(), - txsPoolsCleaner: arguments.TxsPoolsCleaner, + core: arguments.Core, + baseProcessor: base, + dataPool: arguments.DataPool, + txCounter: NewTransactionCounter(), + txsPoolsCleaner: arguments.TxsPoolsCleaner, + stateCheckpointModulus: arguments.StateCheckpointModulus, } sp.baseProcessor.requestBlockBodyHandler = &sp @@ -102,18 +107,15 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) sp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) - sp.processedMiniBlocks = make(map[string]map[string]struct{}) + sp.processedMiniBlocks = processedMb.NewProcessedMiniBlocks() - metaBlockPool := sp.dataPool.MetaBlocks() + metaBlockPool := sp.dataPool.Headers() if metaBlockPool == nil { return nil, process.ErrNilMetaBlocksPool } metaBlockPool.RegisterHandler(sp.receivedMetaBlock) - sp.onRequestHeaderHandler = arguments.RequestHandler.RequestHeader - - sp.metaBlockFinality = process.MetaBlockFinality - sp.lastHdrs = make(mapShardHeader) + sp.metaBlockFinality = process.BlockFinality return &sp, nil } @@ -138,7 +140,7 @@ func (sp *shardProcessor) ProcessBlock( "for shard", headerHandler.GetShardID(), ) - go sp.onRequestHeaderHandler(headerHandler.GetShardID(), headerHandler.GetPrevHash()) + go sp.requestHandler.RequestShardHeader(headerHandler.GetShardID(), headerHandler.GetPrevHash()) } return err @@ -226,6 +228,11 @@ func (sp *shardProcessor) ProcessBlock( } } + err = sp.requestEpochStartInfo(header, haveTime()) + if err != nil { + return err + } + if sp.accounts.JournalLen() != 0 { return process.ErrAccountStateDirty } @@ -234,6 +241,11 @@ func (sp *shardProcessor) ProcessBlock( go sp.checkAndRequestIfMetaHeadersMissing(header.Round) }() + err = sp.checkEpochCorrectness(header, chainHandler) + if err != nil { + return err + } + err = sp.checkMetaHeadersValidityAndFinality() if err != nil { return err @@ -270,7 +282,7 @@ func (sp *shardProcessor) ProcessBlock( return err } - err = sp.txCoordinator.VerifyCreatedBlockTransactions(body) + err = sp.txCoordinator.VerifyCreatedBlockTransactions(header, body) if err != nil { return err } @@ -293,6 +305,60 @@ func (sp *shardProcessor) ProcessBlock( return nil } +func (sp *shardProcessor) requestEpochStartInfo(header *block.Header, waitTime time.Duration) error { + _ = process.EmptyChannel(sp.chRcvEpochStart) + haveMissingMetaHeaders := header.IsStartOfEpochBlock() && !sp.epochStartTrigger.IsEpochStart() + + if haveMissingMetaHeaders { + select { + case <-sp.chRcvEpochStart: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } + } + + return nil +} + +func (sp *shardProcessor) checkEpochCorrectness( + header *block.Header, + chainHandler data.ChainHandler, +) error { + currentBlockHeader := chainHandler.GetCurrentBlockHeader() + if currentBlockHeader == nil { + return nil + } + + isEpochIncorrect := header.GetEpoch() < currentBlockHeader.GetEpoch() + if isEpochIncorrect { + return process.ErrEpochDoesNotMatch + } + + isEpochIncorrect = header.GetEpoch() != currentBlockHeader.GetEpoch() && + sp.epochStartTrigger.Epoch() == currentBlockHeader.GetEpoch() + if isEpochIncorrect { + return process.ErrEpochDoesNotMatch + } + + isOldEpochAndShouldBeNew := sp.epochStartTrigger.IsEpochStart() && + header.GetRound() > sp.epochStartTrigger.EpochFinalityAttestingRound()+process.EpochChangeGracePeriod && + header.GetEpoch() != sp.epochStartTrigger.Epoch() + if isOldEpochAndShouldBeNew { + return process.ErrEpochDoesNotMatch + } + + isEpochStartMetaHashIncorrect := header.IsStartOfEpochBlock() && + !bytes.Equal(header.EpochStartMetaHash, sp.epochStartTrigger.EpochStartMetaHdrHash()) + if isEpochStartMetaHashIncorrect { + go sp.requestHandler.RequestMetaHeader(header.EpochStartMetaHash) + sp.epochStartTrigger.Revert() + return process.ErrEpochDoesNotMatch + } + + return nil +} + // SetNumProcessedObj will set the num of processed transactions func (sp *shardProcessor) SetNumProcessedObj(numObj uint64) { sp.txCounter.totalTxs = numObj @@ -314,17 +380,9 @@ func (sp *shardProcessor) setMetaConsensusData(finalizedMetaBlocks []data.Header return nil } -// SetConsensusData - sets the reward data for the current consensus group -func (sp *shardProcessor) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { - err := sp.specialAddressHandler.SetShardConsensusData(randomness, round, epoch, shardId) - if err != nil { - log.Debug("SetShardConsensusData", "error", err.Error()) - } -} - // checkMetaHeadersValidity - checks if listed metaheaders are valid as construction func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { - tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + lastCrossNotarizedHeader, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(sharding.MetachainShardId) if err != nil { return err } @@ -335,15 +393,15 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { } for _, metaHdr := range usedMetaHdrs[sharding.MetachainShardId] { - err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) + err = sp.headerValidator.IsHeaderConstructionValid(metaHdr, lastCrossNotarizedHeader) if err != nil { - return err + return fmt.Errorf("%w : checkMetaHeadersValidityAndFinality -> isHdrConstructionValid", err) } - tmpNotedHdr = metaHdr + lastCrossNotarizedHeader = metaHdr } - err = sp.checkMetaHdrFinality(tmpNotedHdr) + err = sp.checkMetaHdrFinality(lastCrossNotarizedHeader) if err != nil { return err } @@ -369,10 +427,10 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error // found a header with the next nonce if metaHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(metaHdr, lastVerifiedHdr) + err := sp.headerValidator.IsHeaderConstructionValid(metaHdr, lastVerifiedHdr) if err != nil { - go sp.removeHeaderFromPools(metaHdr, sp.dataPool.MetaBlocks(), sp.dataPool.HeadersNonces()) - log.Trace("isHdrConstructionValid", "error", err.Error()) + log.Debug("checkMetaHdrFinality -> isHdrConstructionValid", + "error", err.Error()) continue } @@ -382,8 +440,8 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error } if nextBlocksVerified < sp.metaBlockFinality { - go sp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()) - go sp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) + go sp.requestHandler.RequestMetaHeaderByNonce(lastVerifiedHdr.GetNonce()) + go sp.requestHandler.RequestMetaHeaderByNonce(lastVerifiedHdr.GetNonce() + 1) return process.ErrHeaderNotFinal } @@ -391,39 +449,28 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error } func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing(round uint64) { - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) - if err != nil { - log.Trace("getOrderedMetaBlocks", "error", err.Error()) - return - } + orderedMetaBlocks, _ := sp.blockTracker.GetTrackedHeaders(sharding.MetachainShardId) - sortedHdrs := make([]data.HeaderHandler, 0, len(orderedMetaBlocks)) - for i := 0; i < len(orderedMetaBlocks); i++ { - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } - sortedHdrs = append(sortedHdrs, hdr) - } - - err = sp.requestHeadersIfMissing(sortedHdrs, sharding.MetachainShardId, round, sp.dataPool.MetaBlocks()) + err := sp.requestHeadersIfMissing(orderedMetaBlocks, sharding.MetachainShardId, round, sp.dataPool.Headers().MaxSize()) if err != nil { - log.Debug("requestHeadersIfMissing", "error", err.Error()) + log.Debug("checkAndRequestIfMetaHeadersMissing", "error", err.Error()) } - lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + lastCrossNotarizedHeader, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(sharding.MetachainShardId) if err != nil { - log.Debug("getLastNotarizedHdr", "error", err.Error()) + log.Debug("checkAndRequestIfMetaHeadersMissing", + "shard", sharding.MetachainShardId, + "error", err.Error()) return } - for i := 0; i < len(sortedHdrs); i++ { - isMetaBlockOutOfRange := sortedHdrs[i].GetNonce() > lastNotarizedHdr.GetNonce()+process.MaxHeadersToRequestInAdvance + for i := 0; i < len(orderedMetaBlocks); i++ { + isMetaBlockOutOfRange := orderedMetaBlocks[i].GetNonce() > lastCrossNotarizedHeader.GetNonce()+process.MaxHeadersToRequestInAdvance if isMetaBlockOutOfRange { break } - sp.txCoordinator.RequestMiniBlocks(sortedHdrs[i]) + sp.txCoordinator.RequestMiniBlocks(orderedMetaBlocks[i]) } return @@ -441,6 +488,8 @@ func (sp *shardProcessor) indexBlockIfNeeded( txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) + invalidPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.InvalidBlock) + receiptPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.ReceiptBlock) for hash, tx := range scPool { txPool[hash] = tx @@ -448,6 +497,12 @@ func (sp *shardProcessor) indexBlockIfNeeded( for hash, tx := range rewardPool { txPool[hash] = tx } + for hash, tx := range invalidPool { + txPool[hash] = tx + } + for hash, tx := range receiptPool { + txPool[hash] = tx + } shardId := sp.shardCoordinator.SelfId() pubKeys, err := sp.nodesCoordinator.GetValidatorsPublicKeys(header.GetPrevRandSeed(), header.GetRound(), shardId) @@ -491,24 +546,23 @@ func (sp *shardProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler log.Debug("RestoreBlockDataFromStorage", "error", errNotCritical.Error()) } + if header.IsStartOfEpochBlock() { + sp.epochStartTrigger.Revert() + } + go sp.txCounter.subtractRestoredTxs(restoredTxNr) - sp.removeLastNotarized() + sp.blockTracker.RemoveLastNotarizedHeaders() return nil } func (sp *shardProcessor) restoreMetaBlockIntoPool(mapMiniBlockHashes map[string]uint32, metaBlockHashes [][]byte) error { - metaBlockPool := sp.dataPool.MetaBlocks() + metaBlockPool := sp.dataPool.Headers() if metaBlockPool == nil { return process.ErrNilMetaBlocksPool } - metaHeaderNoncesPool := sp.dataPool.HeadersNonces() - if metaHeaderNoncesPool == nil { - return process.ErrNilMetaHeadersNoncesDataPool - } - mapMetaHashMiniBlockHashes := make(map[string][][]byte, len(metaBlockHashes)) for _, metaBlockHash := range metaBlockHashes { @@ -524,10 +578,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(mapMiniBlockHashes map[string mapMetaHashMiniBlockHashes[string(metaBlockHash)] = append(mapMetaHashMiniBlockHashes[string(metaBlockHash)], []byte(mbHash)) } - metaBlockPool.Put(metaBlockHash, metaBlock) - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(metaBlock.GetShardID(), metaBlockHash) - metaHeaderNoncesPool.Merge(metaBlock.GetNonce(), syncMap) + metaBlockPool.AddHeader(metaBlockHash, metaBlock) err := sp.store.GetStorer(dataRetriever.MetaBlockUnit).Remove(metaBlockHash) if err != nil { @@ -551,12 +602,12 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(mapMiniBlockHashes map[string for metaBlockHash, miniBlockHashes := range mapMetaHashMiniBlockHashes { for _, miniBlockHash := range miniBlockHashes { - sp.addProcessedMiniBlock([]byte(metaBlockHash), miniBlockHash) + sp.processedMiniBlocks.AddMiniBlockHash(metaBlockHash, string(miniBlockHash)) } } for miniBlockHash := range mapMiniBlockHashes { - sp.removeProcessedMiniBlock([]byte(miniBlockHash)) + sp.processedMiniBlocks.RemoveMiniBlockHash(miniBlockHash) } return nil @@ -568,12 +619,30 @@ func (sp *shardProcessor) CreateBlockBody(initialHdrData data.HeaderHandler, hav log.Trace("started creating block body", "round", initialHdrData.GetRound(), ) + sp.createBlockStarted() sp.blockSizeThrottler.ComputeMaxItems() + initialHdrData.SetEpoch(sp.epochStartTrigger.Epoch()) sp.blockChainHook.SetCurrentHeader(initialHdrData) - miniBlocks, err := sp.createMiniBlocks(sp.blockSizeThrottler.MaxItemsToAdd(), initialHdrData.GetRound(), haveTime) + err := sp.specialAddressHandler.SetShardConsensusData( + initialHdrData.GetPrevRandSeed(), + initialHdrData.GetRound(), + initialHdrData.GetEpoch(), + initialHdrData.GetShardID(), + ) + if err != nil { + return nil, err + } + + log.Trace("started creating block body", + "round", initialHdrData.GetRound(), + "nonce", initialHdrData.GetNonce(), + "epoch", initialHdrData.GetEpoch(), + ) + + miniBlocks, err := sp.createMiniBlocks(sp.blockSizeThrottler.MaxItemsToAdd(), haveTime) if err != nil { return nil, err } @@ -637,20 +706,13 @@ func (sp *shardProcessor) CommitBlock( log.Trace("BlockHeaderUnit store.Put", "error", errNotCritical.Error()) } - headersNoncesPool := sp.dataPool.HeadersNonces() - if headersNoncesPool == nil { - err = process.ErrNilHeadersNoncesDataPool - return err - } - headersPool := sp.dataPool.Headers() if headersPool == nil { err = process.ErrNilHeadersDataPool return err } - headersNoncesPool.Remove(header.GetNonce(), header.GetShardID()) - headersPool.Remove(headerHash) + headersPool.RemoveHeaderByHash(headerHash) body, ok := bodyHandler.(block.Body) if !ok { @@ -686,11 +748,13 @@ func (sp *shardProcessor) CommitBlock( return err } - finalHeaders, finalHeadersHashes, err := sp.getHighestHdrForOwnShardFromMetachain(processedMetaHdrs) + selfNotarizedHeaders, selfNotarizedHeadersHashes, err := sp.getHighestHdrForOwnShardFromMetachain(processedMetaHdrs) if err != nil { return err } + sp.cleanupBlockTrackerPools(headerHandler) + err = sp.saveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) if err != nil { return err @@ -701,9 +765,15 @@ func (sp *shardProcessor) CommitBlock( return err } + if header.IsStartOfEpochBlock() { + err = sp.checkEpochCorrectnessCrossChain(chainHandler) + sp.epochStartTrigger.SetProcessed(header) + } + log.Info("shard block has been committed successfully", - "nonce", header.Nonce, + "epoch", header.Epoch, "round", header.Round, + "nonce", header.Nonce, "hash", headerHash, ) @@ -717,22 +787,24 @@ func (sp *shardProcessor) CommitBlock( log.Debug("removeProcessedMetaBlocksFromPool", "error", errNotCritical.Error()) } - isMetachainStuck := sp.isShardStuck(sharding.MetachainShardId) - - errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, finalHeaders, finalHeadersHashes, isMetachainStuck) + errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, selfNotarizedHeaders, selfNotarizedHeadersHashes) if errNotCritical != nil { log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) } + sp.blockTracker.AddSelfNotarizedHeader(sp.shardCoordinator.SelfId(), chainHandler.GetCurrentBlockHeader(), chainHandler.GetCurrentBlockHeaderHash()) + + selfNotarizedHeader, selfNotarizedHeaderHash := sp.getLastSelfNotarizedHeader() + sp.blockTracker.AddSelfNotarizedHeader(sharding.MetachainShardId, selfNotarizedHeader, selfNotarizedHeaderHash) + + sp.updateStateStorage(selfNotarizedHeaders) + highestFinalBlockNonce := sp.forkDetector.GetHighestFinalBlockNonce() log.Debug("highest final shard block", "nonce", highestFinalBlockNonce, "shard", sp.shardCoordinator.SelfId(), ) - hdrsToAttestPreviousFinal := uint32(header.Nonce-highestFinalBlockNonce) + 1 - sp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) - lastBlockHeader := chainHandler.GetCurrentBlockHeader() err = chainHandler.SetCurrentBlockBody(body) @@ -748,16 +820,17 @@ func (sp *shardProcessor) CommitBlock( chainHandler.SetCurrentBlockHeaderHash(headerHash) sp.indexBlockIfNeeded(bodyHandler, headerHandler, lastBlockHeader) - headerMeta, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + lastCrossNotarizedHeader, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(sharding.MetachainShardId) if err != nil { return err } + saveMetricsForACommittedBlock( sp.appStatusHandler, sp.specialAddressHandler.IsCurrentNodeInConsensus(), display.DisplayByteSlice(headerHash), highestFinalBlockNonce, - headerMeta.GetNonce(), + lastCrossNotarizedHeader, ) headerInfo := bootstrapStorage.BootstrapHeaderInfo{ @@ -766,11 +839,13 @@ func (sp *shardProcessor) CommitBlock( Hash: headerHash, } - sp.mutProcessedMiniBlocks.RLock() - processedMiniBlocks := process.ConvertProcessedMiniBlocksMapToSlice(sp.processedMiniBlocks) - sp.mutProcessedMiniBlocks.RUnlock() + processedMiniBlocks := sp.processedMiniBlocks.ConvertProcessedMiniBlocksMapToSlice() + + if len(selfNotarizedHeaders) > 0 { + sp.lowestNonceInSelfNotarizedHeaders = selfNotarizedHeaders[0].GetNonce() + } - sp.prepareDataForBootStorer(headerInfo, header.Round, finalHeaders, finalHeadersHashes, processedMiniBlocks) + sp.prepareDataForBootStorer(headerInfo, header.Round, selfNotarizedHeaders, selfNotarizedHeadersHashes, sp.lowestNonceInSelfNotarizedHeaders, processedMiniBlocks) go sp.cleanTxsPools() @@ -783,6 +858,7 @@ func (sp *shardProcessor) CommitBlock( sp.shardCoordinator.SelfId(), sp.dataPool, sp.appStatusHandler, + sp.blockTracker, ) sp.blockSizeThrottler.Succeed(header.Round) @@ -790,24 +866,139 @@ func (sp *shardProcessor) CommitBlock( log.Debug("pools info", "headers", sp.dataPool.Headers().Len(), "headers capacity", sp.dataPool.Headers().MaxSize(), - "metablocks", sp.dataPool.MetaBlocks().Len(), - "metablocks capacity", sp.dataPool.MetaBlocks().MaxSize(), "miniblocks", sp.dataPool.MiniBlocks().Len(), "miniblocks capacity", sp.dataPool.MiniBlocks().MaxSize(), ) - go sp.cleanupPools(headersNoncesPool, headersPool, sp.dataPool.MetaBlocks()) + go sp.cleanupPools(headerHandler, headersPool) return nil } -// ApplyProcessedMiniBlocks will apply processed mini blocks -func (sp *shardProcessor) ApplyProcessedMiniBlocks(processedMiniBlocks map[string]map[string]struct{}) { - sp.mutProcessedMiniBlocks.Lock() - for metaHash, miniBlocksHashes := range processedMiniBlocks { - sp.processedMiniBlocks[metaHash] = miniBlocksHashes +func (sp *shardProcessor) updateStateStorage(finalHeaders []data.HeaderHandler) { + // TODO add pruning on metachain. Refactor the pruning mechanism (remove everything before final nonce). + for i := range finalHeaders { + if !sp.accounts.IsPruningEnabled() { + break + } + + sp.saveState(finalHeaders[i]) + + val, errNotCritical := sp.store.Get(dataRetriever.BlockHeaderUnit, finalHeaders[i].GetPrevHash()) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + continue + } + + var prevHeader block.Header + errNotCritical = sp.marshalizer.Unmarshal(&prevHeader, val) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + continue + } + + rootHash := prevHeader.GetRootHash() + if rootHash == nil { + continue + } + + log.Trace("final header will be pruned", "root hash", rootHash) + errNotCritical = sp.accounts.PruneTrie(rootHash) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + sp.accounts.CancelPrune(finalHeaders[i].GetRootHash()) + } +} + +func (sp *shardProcessor) saveState(finalHeader data.HeaderHandler) { + if finalHeader.IsStartOfEpochBlock() { + sp.accounts.SnapshotState(finalHeader.GetRootHash()) + return + } + + // TODO generate checkpoint on a trigger + if finalHeader.GetRound()%uint64(sp.stateCheckpointModulus) == 0 { + sp.accounts.SetStateCheckpoint(finalHeader.GetRootHash()) + } +} + +func (sp *shardProcessor) checkEpochCorrectnessCrossChain(blockChain data.ChainHandler) error { + currentHeader := blockChain.GetCurrentBlockHeader() + if check.IfNil(currentHeader) { + return nil + } + + shouldRevertChain := false + nonce := currentHeader.GetNonce() + shouldEnterNewEpochRound := sp.epochStartTrigger.EpochFinalityAttestingRound() + process.EpochChangeGracePeriod + + for round := currentHeader.GetRound(); round > shouldEnterNewEpochRound && currentHeader.GetEpoch() != sp.epochStartTrigger.Epoch(); round = currentHeader.GetRound() { + shouldRevertChain = true + prevHeader, _, err := process.GetHeaderFromStorageWithNonce( + currentHeader.GetNonce()-1, + sp.shardCoordinator.SelfId(), + sp.store, + sp.uint64Converter, + sp.marshalizer, + ) + if err != nil { + return err + } + + nonce = currentHeader.GetNonce() + currentHeader = prevHeader + } + + if shouldRevertChain { + log.Debug("blockchain is wrongly constructed", + "reverted to nonce", nonce) + + sp.forkDetector.SetRollBackNonce(nonce) + return process.ErrEpochDoesNotMatch + } + + return nil +} + +func (sp *shardProcessor) getLastSelfNotarizedHeader() (data.HeaderHandler, []byte) { + hash := sp.forkDetector.GetHighestFinalBlockHash() + header, err := process.GetShardHeader(hash, sp.dataPool.Headers(), sp.marshalizer, sp.store) + if err != nil { + log.Warn("getLastSelfNotarizedHeader.GetShardHeader", "error", err.Error()) + return nil, nil + } + + return header, hash +} + +func (sp *shardProcessor) saveLastNotarizedHeader(shardId uint32, processedHdrs []data.HeaderHandler) error { + lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash, err := sp.blockTracker.GetLastCrossNotarizedHeader(shardId) + if err != nil { + return err + } + + lenProcessedHdrs := len(processedHdrs) + if lenProcessedHdrs > 0 { + if lastCrossNotarizedHeader.GetNonce() < processedHdrs[lenProcessedHdrs-1].GetNonce() { + lastCrossNotarizedHeader = processedHdrs[lenProcessedHdrs-1] + lastCrossNotarizedHeaderHash, err = core.CalculateHash(sp.marshalizer, sp.hasher, lastCrossNotarizedHeader) + if err != nil { + return err + } + } } - sp.mutProcessedMiniBlocks.Unlock() + + sp.blockTracker.AddCrossNotarizedHeader(shardId, lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash) + DisplayLastNotarized(sp.marshalizer, sp.hasher, lastCrossNotarizedHeader, shardId) + + return nil +} + +// ApplyProcessedMiniBlocks will apply processed mini blocks +func (sp *shardProcessor) ApplyProcessedMiniBlocks(processedMiniBlocks *processedMb.ProcessedMiniBlockTracker) { + sp.processedMiniBlocks = processedMiniBlocks } func (sp *shardProcessor) cleanTxsPools() { @@ -830,8 +1021,6 @@ func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( processedHdrs []data.HeaderHandler, ) ([]data.HeaderHandler, [][]byte, error) { - process.SortHeadersByNonce(processedHdrs) - ownShIdHdrs := make([]data.HeaderHandler, 0, len(processedHdrs)) for i := 0; i < len(processedHdrs); i++ { @@ -871,7 +1060,7 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr ownHdr, err := process.GetShardHeader(shardInfo.HeaderHash, sp.dataPool.Headers(), sp.marshalizer, sp.store) if err != nil { - go sp.onRequestHeaderHandler(shardInfo.ShardID, shardInfo.HeaderHash) + go sp.requestHandler.RequestShardHeader(shardInfo.ShardID, shardInfo.HeaderHash) log.Debug("requested missing shard header", "hash", shardInfo.HeaderHash, @@ -946,7 +1135,7 @@ func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(header *block.He continue } - sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) + sp.processedMiniBlocks.AddMiniBlockHash(string(metaBlockHash), string(miniBlockHash)) delete(miniBlockHashes, key) } @@ -1009,7 +1198,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for hash := range crossMiniBlockHashes { - processedCrossMiniBlocksHashes[hash] = sp.isMiniBlockProcessed([]byte(metaBlockHash), []byte(hash)) + processedCrossMiniBlocksHashes[hash] = sp.processedMiniBlocks.IsMiniBlockProcessed(metaBlockHash, hash) } for key, miniBlockHash := range miniBlockHashes { @@ -1047,7 +1236,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( } func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { - lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + lastCrossNotarizedHeader, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(sharding.MetachainShardId) if err != nil { return err } @@ -1058,7 +1247,7 @@ func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs [] hdr := processedMetaHdrs[i] // remove process finished - if hdr.GetNonce() > lastNotarizedMetaHdr.GetNonce() { + if hdr.GetNonce() > lastCrossNotarizedHeader.GetNonce() { continue } @@ -1083,9 +1272,8 @@ func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs [] continue } - sp.dataPool.MetaBlocks().Remove(headerHash) - sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) - sp.removeAllProcessedMiniBlocks(headerHash) + sp.dataPool.Headers().RemoveHeaderByHash(headerHash) + sp.processedMiniBlocks.RemoveMetaBlockHash(string(headerHash)) log.Trace("metaBlock has been processed completely and removed from pool", "round", hdr.GetRound(), @@ -1108,18 +1296,13 @@ func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs [] // receivedMetaBlock is a callback function when a new metablock was received // upon receiving, it parses the new metablock and requests miniblocks and transactions // which destination is the current shard -func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { - metaBlocksPool := sp.dataPool.MetaBlocks() +func (sp *shardProcessor) receivedMetaBlock(headerHandler data.HeaderHandler, metaBlockHash []byte) { + metaBlocksPool := sp.dataPool.Headers() if metaBlocksPool == nil { return } - obj, ok := metaBlocksPool.Peek(metaBlockHash) - if !ok { - return - } - - metaBlock, ok := obj.(*block.MetaBlock) + metaBlock, ok := headerHandler.(*block.MetaBlock) if !ok { return } @@ -1150,8 +1333,7 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = sp.requestMissingFinalityAttestingHeaders( sharding.MetachainShardId, sp.metaBlockFinality, - sp.getMetaHeaderFromPoolWithNonce, - sp.dataPool.MetaBlocks()) + ) if sp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { log.Debug("received all missing finality attesting meta headers") } @@ -1169,31 +1351,33 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } - sp.setLastHdrForShard(metaBlock.GetShardID(), metaBlock) - - if sp.isHeaderOutOfRange(metaBlock, metaBlocksPool) { - metaBlocksPool.Remove(metaBlockHash) - - headersNoncesPool := sp.dataPool.HeadersNonces() - if headersNoncesPool != nil { - headersNoncesPool.Remove(metaBlock.GetNonce(), metaBlock.GetShardID()) - } + if sp.isHeaderOutOfRange(metaBlock, metaBlocksPool.MaxSize()) { + metaBlocksPool.RemoveHeaderByHash(metaBlockHash) return } - lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + lastCrossNotarizedHeader, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(sharding.MetachainShardId) if err != nil { + log.Debug("receivedMetaBlock", + "shard", sharding.MetachainShardId, + "error", err.Error()) return } - if metaBlock.GetNonce() <= lastNotarizedHdr.GetNonce() { + + if metaBlock.GetNonce() <= lastCrossNotarizedHeader.GetNonce() { return } - if metaBlock.GetRound() <= lastNotarizedHdr.GetRound() { + if metaBlock.GetRound() <= lastCrossNotarizedHeader.GetRound() { return } - isMetaBlockOutOfRange := metaBlock.GetNonce() > lastNotarizedHdr.GetNonce()+process.MaxHeadersToRequestInAdvance + sp.epochStartTrigger.ReceivedHeader(metaBlock) + if sp.epochStartTrigger.IsEpochStart() { + sp.chRcvEpochStart <- true + } + + isMetaBlockOutOfRange := metaBlock.GetNonce() > lastCrossNotarizedHeader.GetNonce()+process.MaxHeadersToRequestInAdvance if isMetaBlockOutOfRange { return } @@ -1213,15 +1397,14 @@ func (sp *shardProcessor) requestMetaHeaders(shardHeader *block.Header) (uint32, sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for _, hash := range missingHeadersHashes { sp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil, usedInBlock: true} - go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) + go sp.requestHandler.RequestMetaHeader(hash) } if sp.hdrsForCurrBlock.missingHdrs == 0 { sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = sp.requestMissingFinalityAttestingHeaders( sharding.MetachainShardId, sp.metaBlockFinality, - sp.getMetaHeaderFromPoolWithNonce, - sp.dataPool.MetaBlocks()) + ) } requestedHdrs := sp.hdrsForCurrBlock.missingHdrs @@ -1238,7 +1421,7 @@ func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Hea for i := 0; i < len(header.MetaBlockHashes); i++ { hdr, err := process.GetMetaHeaderFromPool( header.MetaBlockHashes[i], - sp.dataPool.MetaBlocks()) + sp.dataPool.Headers()) if err != nil { missingHeadersHashes = append(missingHeadersHashes, header.MetaBlockHashes[i]) @@ -1274,7 +1457,7 @@ func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(header *block.Header) e } func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(header *block.Header) (map[string][]byte, error) { - lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + lastCrossNotarizedHeader, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(sharding.MetachainShardId) if err != nil { return nil, err } @@ -1294,10 +1477,10 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(header *block.Header) (ma if metaBlock.GetRound() > header.Round { continue } - if metaBlock.GetRound() <= lastHdr.GetRound() { + if metaBlock.GetRound() <= lastCrossNotarizedHeader.GetRound() { continue } - if metaBlock.GetNonce() <= lastHdr.GetNonce() { + if metaBlock.GetNonce() <= lastCrossNotarizedHeader.GetNonce() { continue } @@ -1311,93 +1494,9 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(header *block.Header) (ma return miniBlockMetaHashes, nil } -func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { - metaBlocksPool := sp.dataPool.MetaBlocks() - if metaBlocksPool == nil { - return nil, process.ErrNilMetaBlocksPool - } - - lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return nil, err - } - - orderedMetaBlocks := make([]*hashAndHdr, 0) - for _, key := range metaBlocksPool.Keys() { - val, _ := metaBlocksPool.Peek(key) - if val == nil { - continue - } - - hdr, ok := val.(*block.MetaBlock) - if !ok { - continue - } - - if hdr.GetRound() > round { - continue - } - if hdr.GetRound() <= lastHdr.GetRound() { - continue - } - if hdr.GetNonce() <= lastHdr.GetNonce() { - continue - } - - orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) - } - - if len(orderedMetaBlocks) > 1 { - sort.Slice(orderedMetaBlocks, func(i, j int) bool { - return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() - }) - } - - return orderedMetaBlocks, nil -} - -// isMetaHeaderFinal verifies if meta is trully final, in order to not do rollbacks -func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHdrs []*hashAndHdr, startPos int) bool { - if currHdr == nil || currHdr.IsInterfaceNil() { - return false - } - if sortedHdrs == nil { - return false - } - - // verify if there are "K" block after current to make this one final - lastVerifiedHdr := currHdr - nextBlocksVerified := uint32(0) - - for i := startPos; i < len(sortedHdrs); i++ { - if nextBlocksVerified >= sp.metaBlockFinality { - return true - } - - // found a header with the next nonce - tmpHdr := sortedHdrs[i].hdr - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) - if err != nil { - continue - } - - lastVerifiedHdr = tmpHdr - nextBlocksVerified += 1 - } - } - - if nextBlocksVerified >= sp.metaBlockFinality { - return true - } - - return false -} - // full verification through metachain header func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( maxItemsInBlock uint32, - round uint64, haveTime func() bool, ) (block.MiniBlockSlice, uint32, uint32, error) { @@ -1405,7 +1504,11 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( txsAdded := uint32(0) hdrsAdded := uint32(0) - orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) + sw := core.NewStopWatch() + sw.Start("ComputeLongestMetaChainFromLastNotarized") + orderedMetaBlocks, orderedMetaBlocksHashes, err := sp.blockTracker.ComputeLongestMetaChainFromLastNotarized() + sw.Stop("ComputeLongestMetaChainFromLastNotarized") + log.Debug("measurements ComputeLongestMetaChainFromLastNotarized", sw.GetMeasurements()...) if err != nil { return nil, 0, 0, err } @@ -1414,7 +1517,7 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( "num metablocks", len(orderedMetaBlocks), ) - lastMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + lastMetaHdr, _, err := sp.blockTracker.GetLastCrossNotarizedHeader(sharding.MetachainShardId) if err != nil { return nil, 0, 0, err } @@ -1444,25 +1547,18 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( break } - hdr, ok := orderedMetaBlocks[i].hdr.(*block.MetaBlock) - if !ok { - continue - } - - err = sp.isHdrConstructionValid(hdr, lastMetaHdr) - if err != nil { - continue - } - - isFinal := sp.isMetaHeaderFinal(hdr, orderedMetaBlocks, i+1) - if !isFinal { - continue + currMetaHdr := orderedMetaBlocks[i] + if currMetaHdr.GetNonce() > lastMetaHdr.GetNonce()+1 { + log.Debug("skip searching", + "last meta hdr nonce", lastMetaHdr.GetNonce(), + "curr meta hdr nonce", currMetaHdr.GetNonce()) + break } - if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { - sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + if len(currMetaHdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocksHashes[i])] = &hdrInfo{hdr: currMetaHdr, usedInBlock: true} hdrsAdded++ - lastMetaHdr = hdr + lastMetaHdr = currMetaHdr continue } @@ -1478,9 +1574,9 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( uint32(len(miniBlocks))) if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - processedMiniBlocksHashes := sp.getProcessedMiniBlocksHashes(orderedMetaBlocks[i].hash) + processedMiniBlocksHashes := sp.processedMiniBlocks.GetProcessedMiniBlocksHashes(string(orderedMetaBlocksHashes[i])) currMBProcessed, currTxsAdded, hdrProcessFinished := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( - hdr, + currMetaHdr, processedMiniBlocksHashes, uint32(maxTxSpaceRemained), uint32(maxMbSpaceRemained), @@ -1491,25 +1587,46 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( txsAdded = txsAdded + currTxsAdded if currTxsAdded > 0 { - sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocksHashes[i])] = &hdrInfo{hdr: currMetaHdr, usedInBlock: true} hdrsAdded++ } if !hdrProcessFinished { + log.Debug("meta block cannot be fully processed", + "round", currMetaHdr.GetRound(), + "nonce", currMetaHdr.GetNonce(), + "hash", orderedMetaBlocksHashes[i]) + break } - lastMetaHdr = hdr + lastMetaHdr = currMetaHdr } } sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + sp.requestMetaHeadersIfNeeded(hdrsAdded, lastMetaHdr) + return miniBlocks, txsAdded, hdrsAdded, nil } +func (sp *shardProcessor) requestMetaHeadersIfNeeded(hdrsAdded uint32, lastMetaHdr data.HeaderHandler) { + log.Debug("meta hdrs added", + "nb", hdrsAdded, + "lastMetaHdr", lastMetaHdr.GetNonce(), + ) + + if hdrsAdded == 0 { + fromNonce := lastMetaHdr.GetNonce() + 1 + toNonce := fromNonce + uint64(sp.metaBlockFinality) + for nonce := fromNonce; nonce <= toNonce; nonce++ { + go sp.requestHandler.RequestMetaHeaderByNonce(nonce) + } + } +} + func (sp *shardProcessor) createMiniBlocks( maxItemsInBlock uint32, - round uint64, haveTime func() bool, ) (block.Body, error) { @@ -1530,7 +1647,7 @@ func (sp *shardProcessor) createMiniBlocks( } startTime := time.Now() - destMeMiniBlocks, nbTxs, nbHdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, round, haveTime) + destMeMiniBlocks, nbTxs, nbHdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, haveTime) elapsedTime := time.Since(startTime) log.Debug("elapsed time to create mbs to me", "time [s]", elapsedTime, @@ -1595,48 +1712,71 @@ func (sp *shardProcessor) createMiniBlocks( } // ApplyBodyToHeader creates a miniblock header list given a block body -func (sp *shardProcessor) ApplyBodyToHeader(hdr data.HeaderHandler, bodyHandler data.BodyHandler) error { - log.Trace("started creating block header", - "round", hdr.GetRound(), - ) +func (sp *shardProcessor) ApplyBodyToHeader(hdr data.HeaderHandler, bodyHandler data.BodyHandler) (data.BodyHandler, error) { + sw := core.NewStopWatch() + sw.Start("ApplyBodyToHeader") + defer func() { + sw.Stop("ApplyBodyToHeader") + + log.Debug("measurements ApplyBodyToHeader", sw.GetMeasurements()...) + }() shardHeader, ok := hdr.(*block.Header) if !ok { - return process.ErrWrongTypeAssertion + return nil, process.ErrWrongTypeAssertion } shardHeader.MiniBlockHeaders = make([]block.MiniBlockHeader, 0) shardHeader.RootHash = sp.getRootHash() - shardHeader.ShardId = sp.shardCoordinator.SelfId() defer func() { go sp.checkAndRequestIfMetaHeadersMissing(hdr.GetRound()) }() - if bodyHandler == nil || bodyHandler.IsInterfaceNil() { - return nil + if check.IfNil(bodyHandler) { + return nil, process.ErrNilBlockBody } body, ok := bodyHandler.(block.Body) if !ok { - return process.ErrWrongTypeAssertion + return nil, process.ErrWrongTypeAssertion } - totalTxCount, miniBlockHeaders, err := sp.createMiniBlockHeaders(body) + var err error + sw.Start("CreateReceiptsHash") + shardHeader.ReceiptsHash, err = sp.txCoordinator.CreateReceiptsHash() + sw.Stop("CreateReceiptsHash") if err != nil { - return err + return nil, err + } + + newBody := deleteSelfReceiptsMiniBlocks(body) + + sw.Start("createMiniBlockHeaders") + totalTxCount, miniBlockHeaders, err := sp.createMiniBlockHeaders(newBody) + sw.Stop("createMiniBlockHeaders") + if err != nil { + return nil, err } shardHeader.MiniBlockHeaders = miniBlockHeaders shardHeader.TxCount = uint32(totalTxCount) + sw.Start("sortHeaderHashesForCurrentBlockByNonce") metaBlockHashes := sp.sortHeaderHashesForCurrentBlockByNonce(true) + sw.Stop("sortHeaderHashesForCurrentBlockByNonce") shardHeader.MetaBlockHashes = metaBlockHashes[sharding.MetachainShardId] + if sp.epochStartTrigger.IsEpochStart() { + shardHeader.EpochStartMetaHash = sp.epochStartTrigger.EpochStartMetaHdrHash() + } + sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(len(body))) + sw.Start("validatorStatisticsProcessor.RootHash") rootHash, err := sp.validatorStatisticsProcessor.RootHash() + sw.Stop("validatorStatisticsProcessor.RootHash") if err != nil { - return err + return nil, err } shardHeader.ValidatorStatsRootHash = rootHash @@ -1645,7 +1785,7 @@ func (sp *shardProcessor) ApplyBodyToHeader(hdr data.HeaderHandler, bodyHandler hdr.GetRound(), core.MaxUint32(hdr.ItemsInBody(), hdr.ItemsInHeader())) - return nil + return newBody, nil } func (sp *shardProcessor) waitForMetaHdrHashes(waitTime time.Duration) error { @@ -1723,68 +1863,7 @@ func (sp *shardProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { // IsInterfaceNil returns true if there is no value under the interface func (sp *shardProcessor) IsInterfaceNil() bool { - if sp == nil { - return true - } - return false -} - -func (sp *shardProcessor) addProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { - sp.mutProcessedMiniBlocks.Lock() - miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] - if !ok { - miniBlocksProcessed := make(map[string]struct{}) - miniBlocksProcessed[string(miniBlockHash)] = struct{}{} - sp.processedMiniBlocks[string(metaBlockHash)] = miniBlocksProcessed - sp.mutProcessedMiniBlocks.Unlock() - return - } - - miniBlocksProcessed[string(miniBlockHash)] = struct{}{} - sp.mutProcessedMiniBlocks.Unlock() -} - -func (sp *shardProcessor) removeProcessedMiniBlock(miniBlockHash []byte) { - sp.mutProcessedMiniBlocks.Lock() - for metaHash, miniBlocksProcessed := range sp.processedMiniBlocks { - _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] - if isProcessed { - delete(miniBlocksProcessed, string(miniBlockHash)) - } - - if len(miniBlocksProcessed) == 0 { - delete(sp.processedMiniBlocks, metaHash) - } - } - sp.mutProcessedMiniBlocks.Unlock() -} - -func (sp *shardProcessor) removeAllProcessedMiniBlocks(metaBlockHash []byte) { - sp.mutProcessedMiniBlocks.Lock() - delete(sp.processedMiniBlocks, string(metaBlockHash)) - sp.mutProcessedMiniBlocks.Unlock() -} - -func (sp *shardProcessor) getProcessedMiniBlocksHashes(metaBlockHash []byte) map[string]struct{} { - sp.mutProcessedMiniBlocks.RLock() - processedMiniBlocksHashes := sp.processedMiniBlocks[string(metaBlockHash)] - sp.mutProcessedMiniBlocks.RUnlock() - - return processedMiniBlocksHashes -} - -func (sp *shardProcessor) isMiniBlockProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { - sp.mutProcessedMiniBlocks.RLock() - miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] - if !ok { - sp.mutProcessedMiniBlocks.RUnlock() - return false - } - - _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] - sp.mutProcessedMiniBlocks.RUnlock() - - return isProcessed + return sp == nil } func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( @@ -1799,19 +1878,6 @@ func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( return maxMbSpaceRemained } -func (sp *shardProcessor) getMetaHeaderFromPoolWithNonce( - nonce uint64, - _ uint32, -) (data.HeaderHandler, []byte, error) { - - metaHeader, metaHeaderHash, err := process.GetMetaHeaderFromPoolWithNonce( - nonce, - sp.dataPool.MetaBlocks(), - sp.dataPool.HeadersNonces()) - - return metaHeader, metaHeaderHash, err -} - func (sp *shardProcessor) updatePeerStateForFinalMetaHeaders(finalHeaders []data.HeaderHandler) error { for _, header := range finalHeaders { _, err := sp.validatorStatisticsProcessor.UpdatePeerState(header) @@ -1842,6 +1908,7 @@ func (sp *shardProcessor) checkValidatorStatisticsRootHash(currentHeader *block. return nil } +// GetBlockBodyFromPool returns block body from pool for a given header func (sp *shardProcessor) GetBlockBodyFromPool(headerHandler data.HeaderHandler) (data.BodyHandler, error) { miniBlockPool := sp.dataPool.MiniBlocks() if miniBlockPool == nil { diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index cefa9d44b6e..da17fb1e7c1 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/data" @@ -18,10 +19,11 @@ import ( "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" @@ -38,25 +40,23 @@ import ( const MaxGasLimitPerBlock = uint64(100000) func createTestShardDataPool() dataRetriever.PoolsHolder { - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: 1}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: 1} - hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} - hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) + hdrPool, _ := headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} + cacherCfg := storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} - metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) + + cacherCfg = storageUnit.CacheConfig{Size: 50000, Type: storageUnit.LRUCache, Shards: 1} + trieNodes, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) currTxs, _ := dataPool.NewCurrentBlockPool() @@ -65,10 +65,9 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { uTxPool, rewardsTxPool, hdrPool, - hdrNonces, txBlockBody, peerChangeBlockBody, - metaBlocks, + trieNodes, currTxs, ) @@ -135,7 +134,6 @@ func CreateMockArgumentsMultiShard() blproc.ArgShardProcessor { arguments.DataPool = initDataPool([]byte("tx_hash1")) arguments.Accounts = initAccountsMock() arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(3) - arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) return arguments } @@ -460,7 +458,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T tdp, &mock.AddressConverterMock{}, accounts, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { return process.ErrHigherNonceInTransaction @@ -496,10 +494,12 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T container, _ := factory.Create() tc, err := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), accounts, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -678,7 +678,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR tdp, &mock.AddressConverterMock{}, accounts, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, tpm, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -711,10 +711,12 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR totalGasConsumed := uint64(0) tc, _ := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), accounts, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -1019,7 +1021,7 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.Headers().AddHeader(metaHash, &meta) meta = block.MetaBlock{ Nonce: 2, @@ -1029,7 +1031,7 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.Headers().AddHeader(metaHash, &meta) // set accounts not dirty journalLen := func() int { return 0 } @@ -1158,7 +1160,7 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { metaHash := hasher.Compute(string(metaBytes)) hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.Headers().AddHeader(metaHash, &meta) meta = block.MetaBlock{ Nonce: 2, @@ -1169,7 +1171,7 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { metaHash = hasher.Compute(string(metaBytes)) hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.Headers().AddHeader(metaHash, &meta) // set accounts not dirty journalLen := func() int { return 0 } @@ -1296,7 +1298,7 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. metaHash := hasher.Compute(string(metaBytes)) hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.Headers().AddHeader(metaHash, meta) // set accounts not dirty journalLen := func() int { return 0 } @@ -1311,8 +1313,8 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. arguments.DataPool = tdp arguments.Hasher = hasher arguments.Marshalizer = marshalizer - arguments.RequestHandler = &mock.RequestHandlerMock{ - RequestHeaderHandlerByNonceCalled: func(destShardID uint32, nonce uint64) { + arguments.RequestHandler = &mock.RequestHandlerStub{ + RequestMetaHeaderByNonceCalled: func(nonce uint64) { atomic.AddInt32(&hdrNoncesRequestCalled, 1) }, } @@ -1334,7 +1336,7 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.Headers().AddHeader(metaHash, meta) sp.CheckAndRequestIfMetaHeadersMissing(2) time.Sleep(100 * time.Millisecond) @@ -1342,107 +1344,6 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. assert.Equal(t, err, process.ErrTimeIsOut) } -//-------- isMetaHeaderFinal -func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte("tx_hash1") - randSeed := []byte("rand seed") - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - blkc := &blockchain.BlockChain{ - CurrentBlockHeader: &block.Header{ - Nonce: 1, - RandSeed: randSeed, - }, - } - rootHash := []byte("rootHash") - body := make(block.Body, 0) - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - miniblock := block.MiniBlock{ - ReceiverShardID: 0, - SenderShardID: 1, - TxHashes: txHashes, - } - body = append(body, &miniblock) - - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - - mbbytes, _ := marshalizer.Marshal(miniblock) - mbHash := hasher.Compute(string(mbbytes)) - mbHdr := block.MiniBlockHeader{ - ReceiverShardID: 0, - SenderShardID: 1, - TxCount: uint32(len(txHashes)), - Hash: mbHash, - } - mbHdrs := make([]block.MiniBlockHeader, 0) - mbHdrs = append(mbHdrs, mbHdr) - - lastHdr := blkc.GetCurrentBlockHeader() - prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) - - shardMiniBlock := block.ShardMiniBlockHeader{ - ReceiverShardID: mbHdr.ReceiverShardID, - SenderShardID: mbHdr.SenderShardID, - TxCount: mbHdr.TxCount, - Hash: mbHdr.Hash, - } - shardMiniblockHdrs := make([]block.ShardMiniBlockHeader, 0) - shardMiniblockHdrs = append(shardMiniblockHdrs, shardMiniBlock) - shardHeader := block.ShardData{ - ShardMiniBlockHeaders: shardMiniblockHdrs, - } - shardHdrs := make([]block.ShardData, 0) - shardHdrs = append(shardHdrs, shardHeader) - - meta := &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) - tdp.MetaBlocks().Put(metaHash, meta) - - genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - - err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrTimeIsOut, err) - res := sp.IsMetaHeaderFinal(&hdr, nil, 0) - assert.False(t, res) - res = sp.IsMetaHeaderFinal(nil, nil, 0) - assert.False(t, res) - - meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: metaHash, - PrevRandSeed: randSeed, - } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) - - meta = &block.MetaBlock{ - Nonce: 1, - ShardInfo: shardHdrs, - Round: 1, - RandSeed: randSeed, - } - ordered, _ := sp.GetOrderedMetaBlocks(3) - res = sp.IsMetaHeaderFinal(meta, ordered, 0) - assert.True(t, res) -} - //-------- requestMissingFinalityAttestingHeaders func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { t.Parallel() @@ -1521,7 +1422,7 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing metaHash1 := hasher.Compute(string(metaBytes)) hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash1) - tdp.MetaBlocks().Put(metaHash1, meta1) + tdp.Headers().AddHeader(metaHash1, meta1) prevHash, _ = core.CalculateHash(marshalizer, hasher, meta1) meta2 := &block.MetaBlock{ @@ -1533,12 +1434,11 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing metaBytes, _ = marshalizer.Marshal(meta2) metaHash2 := hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash2, meta2) + tdp.Headers().AddHeader(metaHash2, meta2) arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp arguments.Hasher = hasher arguments.Marshalizer = marshalizer - arguments.StartHeaders = genesisBlocks sp, _ := blproc.NewShardProcessor(arguments) hdr.Round = 4 @@ -1667,13 +1567,23 @@ func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) arguments.Store = store arguments.Accounts = accounts arguments.ForkDetector = &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadereHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { return 0 }, + GetHighestFinalBlockHashCalled: func() []byte { + return nil + }, + } + blockTrackerMock := &mock.BlockTrackerMock{ + GetCrossNotarizedHeaderCalled: func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + return &block.Header{}, []byte("hash"), nil + }, } + _ = blockTrackerMock.InitCrossNotarizedHeaders(createGenesisBlocks(mock.NewOneShardCoordinatorMock())) + arguments.BlockTracker = blockTrackerMock sp, _ := blproc.NewShardProcessor(arguments) blkc, _ := blockchain.NewBlockChain( @@ -1731,13 +1641,23 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { arguments.Store = store arguments.Accounts = accounts arguments.ForkDetector = &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { return 0 }, + GetHighestFinalBlockHashCalled: func() []byte { + return nil + }, } + blockTrackerMock := &mock.BlockTrackerMock{ + GetCrossNotarizedHeaderCalled: func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + return &block.Header{}, []byte("hash"), nil + }, + } + _ = blockTrackerMock.InitCrossNotarizedHeaders(createGenesisBlocks(arguments.ShardCoordinator)) + arguments.BlockTracker = blockTrackerMock sp, err := blproc.NewShardProcessor(arguments) assert.Nil(t, err) @@ -1755,43 +1675,6 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { assert.True(t, wasCalled) } -func TestShardProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { - t.Parallel() - tdp := initDataPool([]byte("tx_hash1")) - rootHash := []byte("root hash to be tested") - accounts := &mock.AccountsStub{ - RootHashCalled: func() ([]byte, error) { - return rootHash, nil - }, - RevertToSnapshotCalled: func(snapshot int) error { - return nil - }, - } - hdr := &block.Header{ - Nonce: 1, - Round: 1, - PubKeysBitmap: []byte("0100101"), - Signature: []byte("signature"), - RootHash: rootHash, - } - body := make(block.Body, 0) - store := initStore() - - arguments := CreateMockArgumentsMultiShard() - arguments.DataPool = tdp - arguments.Store = store - arguments.Accounts = accounts - sp, _ := blproc.NewShardProcessor(arguments) - - tdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return nil - } - blkc := createTestBlockchain() - err := sp.CommitBlock(blkc, hdr, body) - - assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) -} - func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { t.Parallel() tdp := initDataPool([]byte("tx_hash1")) @@ -1849,7 +1732,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { }, } fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil }, } @@ -1867,7 +1750,7 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { tdp, &mock.AddressConverterMock{}, initAccountsMock(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -1880,10 +1763,12 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { container, _ := factory.Create() tc, err := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1955,7 +1840,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { } forkDetectorAddCalled := false fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { if header == hdr { forkDetectorAddCalled = true return nil @@ -1966,6 +1851,9 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { GetHighestFinalBlockNonceCalled: func() uint64 { return 0 }, + GetHighestFinalBlockHashCalled: func() []byte { + return nil + }, } hasher := &mock.HasherStub{} hasher.ComputeCalled = func(s string) []byte { @@ -1979,6 +1867,13 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { arguments.Hasher = hasher arguments.Accounts = accounts arguments.ForkDetector = fd + blockTrackerMock := &mock.BlockTrackerMock{ + GetCrossNotarizedHeaderCalled: func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + return &block.Header{}, []byte("hash"), nil + }, + } + _ = blockTrackerMock.InitCrossNotarizedHeaders(createGenesisBlocks(mock.NewOneShardCoordinatorMock())) + arguments.BlockTracker = blockTrackerMock sp, _ := blproc.NewShardProcessor(arguments) blkc := createTestBlockchain() @@ -2048,12 +1943,15 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { }, } fd := &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { return 0 }, + GetHighestFinalBlockHashCalled: func() []byte { + return nil + }, } hasher := &mock.HasherStub{} hasher.ComputeCalled = func(s string) []byte { @@ -2099,6 +1997,13 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { } }, } + blockTrackerMock := &mock.BlockTrackerMock{ + GetCrossNotarizedHeaderCalled: func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + return &block.Header{}, []byte("hash"), nil + }, + } + _ = blockTrackerMock.InitCrossNotarizedHeaders(createGenesisBlocks(mock.NewOneShardCoordinatorMock())) + arguments.BlockTracker = blockTrackerMock sp, _ := blproc.NewShardProcessor(arguments) @@ -2139,7 +2044,7 @@ func TestShardProcessor_CreateTxBlockBodyWithDirtyAccStateShouldErr(t *testing.T sp, _ := blproc.NewShardProcessor(arguments) - bl, err := sp.CreateBlockBody(&block.Header{}, func() bool { return true }) + bl, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, func() bool { return true }) // nil block assert.Nil(t, bl) // error @@ -2166,7 +2071,7 @@ func TestShardProcessor_CreateTxBlockBodyWithNoTimeShouldEmptyBlock(t *testing.T haveTime := func() bool { return false } - bl, err := sp.CreateBlockBody(&block.Header{}, haveTime) + bl, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, haveTime) // no error assert.Equal(t, process.ErrTimeIsOut, err) // no miniblocks @@ -2191,7 +2096,7 @@ func TestShardProcessor_CreateTxBlockBodyOK(t *testing.T) { } sp, _ := blproc.NewShardProcessor(arguments) - blk, err := sp.CreateBlockBody(&block.Header{}, haveTime) + blk, err := sp.CreateBlockBody(&block.Header{PrevRandSeed: []byte("randSeed")}, haveTime) assert.NotNil(t, blk) assert.Nil(t, err) } @@ -2299,7 +2204,17 @@ func TestShardProcessor_DisplayLogInfo(t *testing.T) { sp, _ := blproc.NewShardProcessor(arguments) assert.NotNil(t, sp) hdr.PrevHash = hasher.Compute("prev hash") - sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp, statusHandler) + sp.DisplayLogInfo(hdr, txBlock, []byte("tx_hash1"), shardCoordinator.NumberOfShards(), shardCoordinator.SelfId(), tdp, statusHandler, &mock.BlockTrackerMock{}) +} + +func TestBlockProcessor_ApplyBodyToHeaderNilBodyError(t *testing.T) { + t.Parallel() + arguments := CreateMockArgumentsMultiShard() + + bp, _ := blproc.NewShardProcessor(arguments) + hdr := &block.Header{} + _, err := bp.ApplyBodyToHeader(hdr, nil) + assert.Equal(t, process.ErrNilBlockBody, err) } func TestBlockProcessor_ApplyBodyToHeaderShouldNotReturnNil(t *testing.T) { @@ -2308,7 +2223,7 @@ func TestBlockProcessor_ApplyBodyToHeaderShouldNotReturnNil(t *testing.T) { bp, _ := blproc.NewShardProcessor(arguments) hdr := &block.Header{} - err := bp.ApplyBodyToHeader(hdr, nil) + _, err := bp.ApplyBodyToHeader(hdr, block.Body{}) assert.Nil(t, err) assert.NotNil(t, hdr) } @@ -2337,7 +2252,7 @@ func TestShardProcessor_ApplyBodyToHeaderShouldErrWhenMarshalizerErrors(t *testi }, } hdr := &block.Header{} - err := bp.ApplyBodyToHeader(hdr, body) + _, err := bp.ApplyBodyToHeader(hdr, body) assert.NotNil(t, err) } @@ -2364,7 +2279,7 @@ func TestShardProcessor_ApplyBodyToHeaderReturnsOK(t *testing.T) { }, } hdr := &block.Header{} - err := bp.ApplyBodyToHeader(hdr, body) + _, err := bp.ApplyBodyToHeader(hdr, body) assert.Nil(t, err) assert.Equal(t, len(body), len(hdr.MiniBlockHeaders)) } @@ -2420,7 +2335,7 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { tdp, &mock.AddressConverterMock{}, initAccountsMock(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -2433,10 +2348,12 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { container, _ := factory.Create() tc, err := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -2526,7 +2443,7 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { tdp, &mock.AddressConverterMock{}, initAccountsMock(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -2539,10 +2456,12 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { container, _ := factory.Create() tc, err := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -2594,7 +2513,7 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi //put this metaBlock inside datapool metaBlockHash := []byte("metablock hash") - datapool.MetaBlocks().Put(metaBlockHash, metaBlock) + datapool.Headers().AddHeader(metaBlockHash, metaBlock) //put the existing miniblock inside datapool datapool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) @@ -2602,19 +2521,23 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi miniBlockHash2Requested := int32(0) miniBlockHash3Requested := int32(0) - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - if bytes.Equal(miniBlockHash1, miniblockHash) { - atomic.AddInt32(&miniBlockHash1Requested, 1) - } - if bytes.Equal(miniBlockHash2, miniblockHash) { - atomic.AddInt32(&miniBlockHash2Requested, 1) - } - if bytes.Equal(miniBlockHash3, miniblockHash) { - atomic.AddInt32(&miniBlockHash3Requested, 1) - } - }} + requestHandler := &mock.RequestHandlerStub{ + RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + if bytes.Equal(miniBlockHash1, miniblockHash) { + atomic.AddInt32(&miniBlockHash1Requested, 1) + } + if bytes.Equal(miniBlockHash2, miniblockHash) { + atomic.AddInt32(&miniBlockHash2Requested, 1) + } + if bytes.Equal(miniBlockHash3, miniblockHash) { + atomic.AddInt32(&miniBlockHash3Requested, 1) + } + }, + } tc, _ := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), datapool.MiniBlocks(), @@ -2632,7 +2555,7 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi arguments.TxCoordinator = tc bp, _ := blproc.NewShardProcessor(arguments) - bp.ReceivedMetaBlock(metaBlockHash) + bp.ReceivedMetaBlock(metaBlock, metaBlockHash) //we have to wait to be sure txHash1Requested is not incremented by a late call time.Sleep(time.Second) @@ -2674,17 +2597,21 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin //put this metaBlock inside datapool metaBlockHash := []byte("metablock hash") - datapool.MetaBlocks().Put(metaBlockHash, &metaBlock) + datapool.Headers().AddHeader(metaBlockHash, metaBlock) //put the existing miniblock inside datapool datapool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) noOfMissingMiniBlocks := int32(0) - requestHandler := &mock.RequestHandlerMock{RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { - atomic.AddInt32(&noOfMissingMiniBlocks, 1) - }} + requestHandler := &mock.RequestHandlerStub{ + RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { + atomic.AddInt32(&noOfMissingMiniBlocks, 1) + }, + } tc, _ := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), datapool.MiniBlocks(), @@ -2702,7 +2629,7 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin arguments.TxCoordinator = tc sp, _ := blproc.NewShardProcessor(arguments) - sp.ReceivedMetaBlock(metaBlockHash) + sp.ReceivedMetaBlock(metaBlock, metaBlockHash) assert.Equal(t, int32(0), atomic.LoadInt32(&noOfMissingMiniBlocks)) } @@ -2759,7 +2686,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.Headers().AddHeader(metaHash, meta) haveTimeTrue := func() bool { return true @@ -2768,34 +2695,13 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, 2, haveTimeTrue) + miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, haveTimeTrue) assert.Equal(t, err == nil, true) assert.Equal(t, len(miniBlockSlice) == 0, true) assert.Equal(t, usedMetaHdrsHashes, uint32(0)) assert.Equal(t, noOfTxs, uint32(0)) } -func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTypeAssertion(t *testing.T) { - t.Parallel() - - tdp := mock.NewPoolsHolderMock() - txHash := []byte(nil) - tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) - - startHeaders := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) - - startHeaders[sharding.MetachainShardId] = &block.Header{} - - arguments := CreateMockArgumentsMultiShard() - arguments.DataPool = tdp - arguments.StartHeaders = startHeaders - - sp, err := blproc.NewShardProcessor(arguments) - - assert.Nil(t, sp) - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlocksInMetaBlock(t *testing.T) { t.Parallel() @@ -2841,10 +2747,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo } mb1Hash := []byte("meta block 1") - tdp.MetaBlocks().Put( - mb1Hash, - meta, - ) + tdp.Headers().AddHeader(mb1Hash, meta) meta = &block.MetaBlock{ Nonce: 2, @@ -2853,10 +2756,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo } mb2Hash := []byte("meta block 2") - tdp.MetaBlocks().Put( - mb2Hash, - meta, - ) + tdp.Headers().AddHeader(mb2Hash, meta) meta = &block.MetaBlock{ Nonce: 3, @@ -2866,16 +2766,13 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo } mb3Hash := []byte("meta block 3") - tdp.MetaBlocks().Put( - mb3Hash, - meta, - ) + tdp.Headers().AddHeader(mb3Hash, meta) arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, 2, haveTimeTrue) + miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, haveTimeTrue) assert.Equal(t, 0, len(miniBlocksReturned)) assert.Equal(t, uint32(0), usedMetaHdrsHashes) @@ -2909,15 +2806,15 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) datapool.Transactions().AddData(txHash1, &transaction.Transaction{ Nonce: tx1Nonce, - Data: string(txHash1), + Data: txHash1, }, cacheId) datapool.Transactions().AddData(txHash2, &transaction.Transaction{ Nonce: tx2Nonce, - Data: string(txHash2), + Data: txHash2, }, cacheId) datapool.Transactions().AddData(txHash3, &transaction.Transaction{ Nonce: tx3Nonce, - Data: string(txHash3), + Data: txHash3, }, cacheId) tx1ExecutionResult := uint64(0) @@ -2927,13 +2824,13 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T txProcessorMock := &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if transaction.Data == string(txHash1) { + if bytes.Equal(transaction.Data, txHash1) { tx1ExecutionResult = transaction.Nonce } - if transaction.Data == string(txHash2) { + if bytes.Equal(transaction.Data, txHash2) { tx2ExecutionResult = transaction.Nonce } - if transaction.Data == string(txHash3) { + if bytes.Equal(transaction.Data, txHash3) { tx3ExecutionResult = transaction.Nonce } @@ -2960,7 +2857,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T datapool, &mock.AddressConverterMock{}, accntAdapter, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, txProcessorMock, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -2998,10 +2895,12 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T container, _ := factory.Create() tc, err := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), accntAdapter, datapool.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -3015,7 +2914,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T arguments.TxCoordinator = tc bp, _ := blproc.NewShardProcessor(arguments) - blockBody, err := bp.CreateMiniBlocks(15000, 0, func() bool { return true }) + blockBody, err := bp.CreateMiniBlocks(15000, func() bool { return true }) assert.Nil(t, err) //testing execution @@ -3056,22 +2955,15 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { //put 3 metablocks in pool metaBlockHash1 := []byte("meta block 1") metaBlock1 := createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]) - datapool.MetaBlocks().Put( - metaBlockHash1, - metaBlock1, - ) + datapool.Headers().AddHeader(metaBlockHash1, metaBlock1) + metaBlockHash2 := []byte("meta block 2") metaBlock2 := createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]) - datapool.MetaBlocks().Put( - metaBlockHash2, - metaBlock2, - ) + datapool.Headers().AddHeader(metaBlockHash2, metaBlock2) + metaBlockHash3 := []byte("meta block 3") metaBlock3 := createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]) - datapool.MetaBlocks().Put( - metaBlockHash3, - metaBlock3, - ) + datapool.Headers().AddHeader(metaBlockHash3, metaBlock3) shardCoordinator := mock.NewMultipleShardsCoordinatorMock() shardCoordinator.CurrentShard = destShardId @@ -3087,7 +2979,6 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { return 0 }, } - arguments.StartHeaders = createGenesisBlocks(shardCoordinator) bp, _ := blproc.NewShardProcessor(arguments) bp.SetHdrForCurrentBlock(metaBlockHash1, metaBlock1, true) @@ -3112,12 +3003,6 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { err := bp.AddProcessedCrossMiniBlocksFromHeader(blockHeader) assert.Nil(t, err) - //check WasMiniBlockProcessed for remaining metablocks - assert.True(t, bp.IsMiniBlockProcessed(metaBlockHash2, miniblockHashes[2])) - assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash2, miniblockHashes[3])) - - assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash3, miniblockHashes[4])) - assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash3, miniblockHashes[5])) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { @@ -3177,7 +3062,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { datapool, &mock.AddressConverterMock{}, initAccountsMock(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -3190,10 +3075,12 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { container, _ := factory.Create() tc, err := coordinator.NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), datapool.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -3223,10 +3110,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { metablockHash := []byte("meta block hash 1") metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) - datapool.MetaBlocks().Put( - metablockHash, - metablockHeader, - ) + datapool.Headers().AddHeader(metablockHash, metablockHeader) store.GetStorerCalled = func(unitType dataRetriever.UnitType) storage.Storer { return &mock.StorerStub{ @@ -3255,7 +3139,6 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { assert.Nil(t, err) assert.Equal(t, &miniblock, miniblockFromPool) assert.Equal(t, tx, txFromPool) - assert.Equal(t, false, sp.IsMiniBlockProcessed(metablockHash, miniblockHash)) } func TestShardProcessor_DecodeBlockBody(t *testing.T) { @@ -3320,7 +3203,6 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { arguments.Hasher = hasher arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) - arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) prevRandSeed := []byte("prevrand") @@ -3362,17 +3244,6 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { err = sp.IsHdrConstructionValid(currHdr, prevHdr) assert.Equal(t, err, process.ErrWrongNonceInBlock) - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateDoesNotMatch) - - currHdr.Nonce = 0 - prevHdr.Nonce = 0 - prevHdr.RootHash = nil - err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Nil(t, err) - currHdr.Nonce = 46 prevHdr.Nonce = 45 prevHdr.Round = currHdr.Round + 1 @@ -3429,18 +3300,19 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) sp, _ := blproc.NewShardProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") - notarizedHdrs := sp.NotarizedHdrs() firstNonce := uint64(44) lastHdr := &block.MetaBlock{Round: 9, Nonce: firstNonce, RandSeed: prevRandSeed} - notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) + + arguments.BlockTracker.AddCrossNotarizedHeader(sharding.MetachainShardId, lastHdr, nil) //header shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) @@ -3478,12 +3350,11 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 0, putCalledNr) - notarizedHdrs = sp.NotarizedHdrs() assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) assert.Equal(t, 0, len(processedMetaHdrs)) // wrong header type in pool and defer called - datapool.MetaBlocks().Put(currHash, shardHdr) + datapool.Headers().AddHeader(currHash, shardHdr) sp.SetHdrForCurrentBlock(currHash, shardHdr, true) hashes := make([][]byte, 0) @@ -3500,12 +3371,11 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 0, putCalledNr) - notarizedHdrs = sp.NotarizedHdrs() assert.Equal(t, firstNonce, sp.LastNotarizedHdrForShard(sharding.MetachainShardId).GetNonce()) // put headers in pool - datapool.MetaBlocks().Put(currHash, currHdr) - datapool.MetaBlocks().Put(prevHash, prevHdr) + datapool.Headers().AddHeader(currHash, currHdr) + datapool.Headers().AddHeader(prevHash, prevHdr) sp.CreateBlockStarted() sp.SetHdrForCurrentBlock(currHash, currHdr, true) @@ -3582,7 +3452,8 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) sp, _ := blproc.NewShardProcessor(arguments) prevRandSeed := []byte("prevrand") @@ -3660,8 +3531,8 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes prevHash, _ = sp.ComputeHeaderHash(prevHdr) // put headers in pool - datapool.MetaBlocks().Put(currHash, currHdr) - datapool.MetaBlocks().Put(prevHash, prevHdr) + datapool.Headers().AddHeader(currHash, currHdr) + datapool.Headers().AddHeader(prevHash, prevHdr) sp.SetHdrForCurrentBlock(currHash, currHdr, true) sp.SetHdrForCurrentBlock(prevHash, prevHdr, true) @@ -3712,7 +3583,8 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) + startHeaders := createGenesisBlocks(arguments.ShardCoordinator) + arguments.BlockTracker = mock.NewBlockTrackerMock(arguments.ShardCoordinator, startHeaders) sp, _ := blproc.NewShardProcessor(arguments) prevRandSeed := []byte("prevrand") @@ -3795,9 +3667,9 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin prevHash, _ = sp.ComputeHeaderHash(prevHdr) // put headers in pool - datapool.MetaBlocks().Put(currHash, currHdr) - datapool.MetaBlocks().Put(prevHash, prevHdr) - datapool.MetaBlocks().Put([]byte("shouldNotRemove"), &block.MetaBlock{ + datapool.Headers().AddHeader(currHash, currHdr) + datapool.Headers().AddHeader(prevHash, prevHdr) + datapool.Headers().AddHeader([]byte("shouldNotRemove"), &block.MetaBlock{ Round: 12, PrevRandSeed: []byte("nextrand"), PrevHash: currHash, @@ -3968,14 +3840,14 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolShouldPass(t *testing.T) { metablockHashes := make([][]byte, 0) metablockHashes = append(metablockHashes, metaHash) - metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, err := poolFake.Headers().GetHeaderByHash(metaHash) assert.Equal(t, nil, metaBlockRestored) - assert.False(t, ok) + assert.Error(t, err) - err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) + err = sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, _ = poolFake.Headers().GetHeaderByHash(metaHash) assert.Equal(t, &metaBlock, metaBlockRestored) assert.Nil(t, err) @@ -4065,13 +3937,13 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHd arguments.Store = store arguments.Hasher = hasher arguments.Marshalizer = marshalizer - arguments.StartHeaders = genesisBlocks + arguments.BlockTracker = &mock.BlockTrackerMock{} sp, _ := blproc.NewShardProcessor(arguments) shardInfo := make([]block.ShardData, 0) shardInfo = append(shardInfo, block.ShardData{HeaderHash: []byte("hash"), ShardID: 1}) - _ = datapool.Headers().Put([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) + datapool.Headers().AddHeader([]byte("hash"), &block.Header{ShardId: 0, Nonce: 1}) prevMetaHdr := genesisBlocks[sharding.MetachainShardId] prevHash, _ := core.CalculateHash(marshalizer, hasher, prevMetaHdr) @@ -4085,7 +3957,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHd ShardInfo: shardInfo, } currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = datapool.MetaBlocks().Put(currHash, currMetaHdr) + datapool.Headers().AddHeader(currHash, currMetaHdr) processedHdrs = append(processedHdrs, currMetaHdr) prevMetaHdr = currMetaHdr @@ -4100,7 +3972,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithoutOwnHd ShardInfo: shardInfo, } currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = datapool.MetaBlocks().Put(currHash, currMetaHdr) + datapool.Headers().AddHeader(currHash, currMetaHdr) processedHdrs = append(processedHdrs, currMetaHdr) hdrs, _, _ := sp.GetHighestHdrForOwnShardFromMetachain(processedHdrs) @@ -4124,7 +3996,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrBu arguments.Store = store arguments.Hasher = hasher arguments.Marshalizer = marshalizer - arguments.StartHeaders = genesisBlocks + arguments.BlockTracker = &mock.BlockTrackerMock{} sp, _ := blproc.NewShardProcessor(arguments) @@ -4143,7 +4015,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrBu ShardInfo: shardInfo, } currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = datapool.MetaBlocks().Put(currHash, currMetaHdr) + datapool.Headers().AddHeader(currHash, currMetaHdr) processedHdrs = append(processedHdrs, currMetaHdr) prevMetaHdr = currMetaHdr @@ -4158,7 +4030,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrBu ShardInfo: shardInfo, } currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = datapool.MetaBlocks().Put(currHash, currMetaHdr) + datapool.Headers().AddHeader(currHash, currMetaHdr) processedHdrs = append(processedHdrs, currMetaHdr) hdrs, _, _ := sp.GetHighestHdrForOwnShardFromMetachain(processedHdrs) @@ -4181,7 +4053,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrSt arguments.Store = store arguments.Hasher = hasher arguments.Marshalizer = marshalizer - arguments.StartHeaders = genesisBlocks + arguments.BlockTracker = &mock.BlockTrackerMock{} sp, _ := blproc.NewShardProcessor(arguments) @@ -4190,7 +4062,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrSt Round: 1, } ownHash, _ := core.CalculateHash(marshalizer, hasher, ownHdr) - _ = datapool.Headers().Put(ownHash, ownHdr) + datapool.Headers().AddHeader(ownHash, ownHdr) shardInfo := make([]block.ShardData, 0) shardInfo = append(shardInfo, block.ShardData{HeaderHash: ownHash, ShardID: 0}) @@ -4207,7 +4079,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrSt ShardInfo: shardInfo, } currHash, _ := core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = datapool.MetaBlocks().Put(currHash, currMetaHdr) + datapool.Headers().AddHeader(currHash, currMetaHdr) ownHdr = &block.Header{ Nonce: 2, @@ -4232,7 +4104,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrSt ShardInfo: shardInfo, } currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = datapool.MetaBlocks().Put(currHash, currMetaHdr) + datapool.Headers().AddHeader(currHash, currMetaHdr) processedHdrs = append(processedHdrs, currMetaHdr) prevMetaHdr = currMetaHdr @@ -4246,7 +4118,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachaiMetaHdrsWithOwnHdrSt RandSeed: prevMetaHdr.GetRandSeed(), } currHash, _ = core.CalculateHash(marshalizer, hasher, currMetaHdr) - _ = datapool.MetaBlocks().Put(currHash, currMetaHdr) + datapool.Headers().AddHeader(currHash, currMetaHdr) processedHdrs = append(processedHdrs, currMetaHdr) hdrs, _, _ := sp.GetHighestHdrForOwnShardFromMetachain(processedHdrs) @@ -4268,8 +4140,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { arguments.DataPool = poolMock arguments.Store = storer arguments.ShardCoordinator = shardC - arguments.StartHeaders = createGenesisBlocks(shardC) - arguments.Rounder = &mock.RounderMock{} + arguments.BlockTracker = &mock.BlockTrackerMock{} sp, _ := blproc.NewShardProcessor(arguments) miniblockHashes := make(map[string]uint32, 0) @@ -4300,14 +4171,13 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { return []byte("cool") } metaHash := hasher.Compute(string(metaBytes)) - sp.AddProcessedMiniBlock(metaHash, testMBHash) metablockHashes := make([][]byte, 0) metablockHashes = append(metablockHashes, metaHash) - metaBlockRestored, ok := poolMock.MetaBlocks().Get(metaHash) + metaBlockRestored, err := poolMock.Headers().GetHeaderByHash(metaHash) assert.Equal(t, nil, metaBlockRestored) - assert.False(t, ok) + assert.Error(t, err) storer.GetCalled = func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { return metaBytes, nil @@ -4323,11 +4193,10 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { } } - err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) + err = sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - metaBlockRestored, _ = poolMock.MetaBlocks().Get(metaHash) + metaBlockRestored, _ = poolMock.Headers().GetHeaderByHash(metaHash) assert.Equal(t, meta, metaBlockRestored) assert.Nil(t, err) - assert.True(t, sp.IsMiniBlockProcessed(metaHash, testMBHash)) } diff --git a/process/common.go b/process/common.go index b27649b67de..dc37ef53003 100644 --- a/process/common.go +++ b/process/common.go @@ -4,6 +4,7 @@ import ( "math" "sort" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -11,9 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-go/storage" ) var log = logger.GetOrCreate("process") @@ -34,17 +33,17 @@ func EmptyChannel(ch chan bool) int { // GetShardHeader gets the header, which is associated with the given hash, from pool or storage func GetShardHeader( hash []byte, - cacher storage.Cacher, + headersCacher dataRetriever.HeadersPool, marshalizer marshal.Marshalizer, storageService dataRetriever.StorageService, ) (*block.Header, error) { - err := checkGetHeaderParamsForNil(cacher, marshalizer, storageService) + err := checkGetHeaderParamsForNil(headersCacher, marshalizer, storageService) if err != nil { return nil, err } - hdr, err := GetShardHeaderFromPool(hash, cacher) + hdr, err := GetShardHeaderFromPool(hash, headersCacher) if err != nil { hdr, err = GetShardHeaderFromStorage(hash, marshalizer, storageService) if err != nil { @@ -58,17 +57,17 @@ func GetShardHeader( // GetMetaHeader gets the header, which is associated with the given hash, from pool or storage func GetMetaHeader( hash []byte, - cacher storage.Cacher, + headersCacher dataRetriever.HeadersPool, marshalizer marshal.Marshalizer, storageService dataRetriever.StorageService, ) (*block.MetaBlock, error) { - err := checkGetHeaderParamsForNil(cacher, marshalizer, storageService) + err := checkGetHeaderParamsForNil(headersCacher, marshalizer, storageService) if err != nil { return nil, err } - hdr, err := GetMetaHeaderFromPool(hash, cacher) + hdr, err := GetMetaHeaderFromPool(hash, headersCacher) if err != nil { hdr, err = GetMetaHeaderFromStorage(hash, marshalizer, storageService) if err != nil { @@ -82,10 +81,10 @@ func GetMetaHeader( // GetShardHeaderFromPool gets the header, which is associated with the given hash, from pool func GetShardHeaderFromPool( hash []byte, - cacher storage.Cacher, + headersCacher dataRetriever.HeadersPool, ) (*block.Header, error) { - obj, err := getHeaderFromPool(hash, cacher) + obj, err := getHeaderFromPool(hash, headersCacher) if err != nil { return nil, err } @@ -101,10 +100,10 @@ func GetShardHeaderFromPool( // GetMetaHeaderFromPool gets the header, which is associated with the given hash, from pool func GetMetaHeaderFromPool( hash []byte, - cacher storage.Cacher, + headersCacher dataRetriever.HeadersPool, ) (*block.MetaBlock, error) { - obj, err := getHeaderFromPool(hash, cacher) + obj, err := getHeaderFromPool(hash, headersCacher) if err != nil { return nil, err } @@ -191,19 +190,18 @@ func GetMarshalizedHeaderFromStorage( func GetShardHeaderWithNonce( nonce uint64, shardId uint32, - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, + headersCacher dataRetriever.HeadersPool, marshalizer marshal.Marshalizer, storageService dataRetriever.StorageService, uint64Converter typeConverters.Uint64ByteSliceConverter, ) (*block.Header, []byte, error) { - err := checkGetHeaderWithNonceParamsForNil(cacher, uint64SyncMapCacher, marshalizer, storageService, uint64Converter) + err := checkGetHeaderWithNonceParamsForNil(headersCacher, marshalizer, storageService, uint64Converter) if err != nil { return nil, nil, err } - hdr, hash, err := GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher, uint64SyncMapCacher) + hdr, hash, err := GetShardHeaderFromPoolWithNonce(nonce, shardId, headersCacher) if err != nil { hdr, hash, err = GetShardHeaderFromStorageWithNonce(nonce, shardId, storageService, uint64Converter, marshalizer) if err != nil { @@ -217,19 +215,18 @@ func GetShardHeaderWithNonce( // GetMetaHeaderWithNonce method returns a meta block header with a given nonce func GetMetaHeaderWithNonce( nonce uint64, - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, + headersCacher dataRetriever.HeadersPool, marshalizer marshal.Marshalizer, storageService dataRetriever.StorageService, uint64Converter typeConverters.Uint64ByteSliceConverter, ) (*block.MetaBlock, []byte, error) { - err := checkGetHeaderWithNonceParamsForNil(cacher, uint64SyncMapCacher, marshalizer, storageService, uint64Converter) + err := checkGetHeaderWithNonceParamsForNil(headersCacher, marshalizer, storageService, uint64Converter) if err != nil { return nil, nil, err } - hdr, hash, err := GetMetaHeaderFromPoolWithNonce(nonce, cacher, uint64SyncMapCacher) + hdr, hash, err := GetMetaHeaderFromPoolWithNonce(nonce, headersCacher) if err != nil { hdr, hash, err = GetMetaHeaderFromStorageWithNonce(nonce, storageService, uint64Converter, marshalizer) if err != nil { @@ -244,11 +241,10 @@ func GetMetaHeaderWithNonce( func GetShardHeaderFromPoolWithNonce( nonce uint64, shardId uint32, - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, + headersCacher dataRetriever.HeadersPool, ) (*block.Header, []byte, error) { - obj, hash, err := getHeaderFromPoolWithNonce(nonce, shardId, cacher, uint64SyncMapCacher) + obj, hash, err := getHeaderFromPoolWithNonce(nonce, shardId, headersCacher) if err != nil { return nil, nil, err } @@ -264,11 +260,10 @@ func GetShardHeaderFromPoolWithNonce( // GetMetaHeaderFromPoolWithNonce method returns a meta block header from pool with a given nonce func GetMetaHeaderFromPoolWithNonce( nonce uint64, - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, + headersCacher dataRetriever.HeadersPool, ) (*block.MetaBlock, []byte, error) { - obj, hash, err := getHeaderFromPoolWithNonce(nonce, sharding.MetachainShardId, cacher, uint64SyncMapCacher) + obj, hash, err := getHeaderFromPoolWithNonce(nonce, sharding.MetachainShardId, headersCacher) if err != nil { return nil, nil, err } @@ -357,6 +352,7 @@ func GetTransactionHandler( shardedDataCacherNotifier dataRetriever.ShardedDataCacherNotifier, storageService dataRetriever.StorageService, marshalizer marshal.Marshalizer, + searchFirst bool, ) (data.TransactionHandler, error) { err := checkGetTransactionParamsForNil(shardedDataCacherNotifier, storageService, marshalizer) @@ -364,7 +360,7 @@ func GetTransactionHandler( return nil, err } - tx, err := GetTransactionHandlerFromPool(senderShardID, destShardID, txHash, shardedDataCacherNotifier) + tx, err := GetTransactionHandlerFromPool(senderShardID, destShardID, txHash, shardedDataCacherNotifier, searchFirst) if err != nil { tx, err = GetTransactionHandlerFromStorage(txHash, storageService, marshalizer) if err != nil { @@ -381,19 +377,30 @@ func GetTransactionHandlerFromPool( destShardID uint32, txHash []byte, shardedDataCacherNotifier dataRetriever.ShardedDataCacherNotifier, + searchFirst bool, ) (data.TransactionHandler, error) { if shardedDataCacherNotifier == nil { return nil, ErrNilShardedDataCacherNotifier } - strCache := ShardCacherIdentifier(senderShardID, destShardID) - txStore := shardedDataCacherNotifier.ShardDataStore(strCache) - if txStore == nil { - return nil, ErrNilStorage + var val interface{} + ok := false + if searchFirst { + val, ok = shardedDataCacherNotifier.SearchFirstData(txHash) + if !ok { + return nil, ErrTxNotFound + } + } else { + strCache := ShardCacherIdentifier(senderShardID, destShardID) + txStore := shardedDataCacherNotifier.ShardDataStore(strCache) + if txStore == nil { + return nil, ErrNilStorage + } + + val, ok = txStore.Peek(txHash) } - val, ok := txStore.Peek(txHash) if !ok { return nil, ErrTxNotFound } @@ -435,7 +442,7 @@ func GetTransactionHandlerFromStorage( } func checkGetHeaderParamsForNil( - cacher storage.Cacher, + cacher dataRetriever.HeadersPool, marshalizer marshal.Marshalizer, storageService dataRetriever.StorageService, ) error { @@ -454,21 +461,17 @@ func checkGetHeaderParamsForNil( } func checkGetHeaderWithNonceParamsForNil( - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, + headersCacher dataRetriever.HeadersPool, marshalizer marshal.Marshalizer, storageService dataRetriever.StorageService, uint64Converter typeConverters.Uint64ByteSliceConverter, ) error { - err := checkGetHeaderParamsForNil(cacher, marshalizer, storageService) + err := checkGetHeaderParamsForNil(headersCacher, marshalizer, storageService) if err != nil { return err } - if uint64SyncMapCacher == nil || uint64SyncMapCacher.IsInterfaceNil() { - return ErrNilUint64SyncMapCacher - } - if uint64Converter == nil || uint64Converter.IsInterfaceNil() { + if check.IfNil(uint64Converter) { return ErrNilUint64Converter } @@ -496,15 +499,15 @@ func checkGetTransactionParamsForNil( func getHeaderFromPool( hash []byte, - cacher storage.Cacher, + headersCacher dataRetriever.HeadersPool, ) (interface{}, error) { - if cacher == nil || cacher.IsInterfaceNil() { + if check.IfNil(headersCacher) { return nil, ErrNilCacher } - obj, ok := cacher.Peek(hash) - if !ok { + obj, err := headersCacher.GetHeaderByHash(hash) + if err != nil { return nil, ErrMissingHeader } @@ -514,33 +517,20 @@ func getHeaderFromPool( func getHeaderFromPoolWithNonce( nonce uint64, shardId uint32, - cacher storage.Cacher, - uint64SyncMapCacher dataRetriever.Uint64SyncMapCacher, + headersCacher dataRetriever.HeadersPool, ) (interface{}, []byte, error) { - if cacher == nil || cacher.IsInterfaceNil() { + if check.IfNil(headersCacher) { return nil, nil, ErrNilCacher } - if uint64SyncMapCacher == nil || uint64SyncMapCacher.IsInterfaceNil() { - return nil, nil, ErrNilUint64SyncMapCacher - } - - syncMap, ok := uint64SyncMapCacher.Get(nonce) - if !ok { - return nil, nil, ErrMissingHashForHeaderNonce - } - - hash, ok := syncMap.Load(shardId) - if hash == nil || !ok { - return nil, nil, ErrMissingHashForHeaderNonce - } - obj, ok := cacher.Peek(hash) - if !ok { + headers, hashes, err := headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) + if err != nil { return nil, nil, ErrMissingHeader } - return obj, hash, nil + //TODO what should we do when we get from pool more than one header with same nonce and shardId + return headers[len(headers)-1], hashes[len(hashes)-1], nil } func getHeaderHashFromStorageWithNonce( @@ -613,36 +603,3 @@ type ForkInfo struct { func NewForkInfo() *ForkInfo { return &ForkInfo{IsDetected: false, Nonce: math.MaxUint64, Round: math.MaxUint64, Hash: nil} } - -// ConvertProcessedMiniBlocksMapToSlice will convert a map[string]map[string]struct{} in a slice of MiniBlocksInMeta -func ConvertProcessedMiniBlocksMapToSlice(processedMiniBlocks map[string]map[string]struct{}) []bootstrapStorage.MiniBlocksInMeta { - miniBlocksInMetaBlocks := make([]bootstrapStorage.MiniBlocksInMeta, 0, len(processedMiniBlocks)) - - for metaHash, miniBlocksHashes := range processedMiniBlocks { - miniBlocksInMeta := bootstrapStorage.MiniBlocksInMeta{ - MetaHash: []byte(metaHash), - MiniBlocksHashes: make([][]byte, 0, len(miniBlocksHashes)), - } - for miniBlockHash := range miniBlocksHashes { - miniBlocksInMeta.MiniBlocksHashes = append(miniBlocksInMeta.MiniBlocksHashes, []byte(miniBlockHash)) - } - miniBlocksInMetaBlocks = append(miniBlocksInMetaBlocks, miniBlocksInMeta) - } - - return miniBlocksInMetaBlocks -} - -// ConvertSliceToProcessedMiniBlocksMap will convert a slice of MiniBlocksInMeta in an map[string]map[string]struct{} -func ConvertSliceToProcessedMiniBlocksMap(miniBlocksInMetaBlocks []bootstrapStorage.MiniBlocksInMeta) map[string]map[string]struct{} { - processedMiniBlocks := make(map[string]map[string]struct{}, len(miniBlocksInMetaBlocks)) - - for _, miniBlocksInMeta := range miniBlocksInMetaBlocks { - miniBlocksHashes := make(map[string]struct{}, len(miniBlocksInMeta.MiniBlocksHashes)) - for _, miniBlockHash := range miniBlocksInMeta.MiniBlocksHashes { - miniBlocksHashes[string(miniBlockHash)] = struct{}{} - } - processedMiniBlocks[string(miniBlocksInMeta.MetaHash)] = miniBlocksHashes - } - - return processedMiniBlocks -} diff --git a/process/common_test.go b/process/common_test.go index 63e8da3b6fe..544a6747cea 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -48,7 +48,7 @@ func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { wg := sync.WaitGroup{} wgChanWasWritten := sync.WaitGroup{} - numConcurrentWrites := 100 + numConcurrentWrites := 50 wg.Add(numConcurrentWrites) wgChanWasWritten.Add(numConcurrentWrites) for i := 0; i < numConcurrentWrites; i++ { @@ -90,7 +90,7 @@ func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { func TestGetShardHeaderShouldErrNilMarshalizer(t *testing.T) { hash := []byte("X") - cacher := &mock.CacherStub{} + cacher := &mock.HeadersCacherStub{} storageService := &mock.ChainStorerMock{} header, err := process.GetShardHeader(hash, cacher, nil, storageService) @@ -101,7 +101,7 @@ func TestGetShardHeaderShouldErrNilMarshalizer(t *testing.T) { func TestGetShardHeaderShouldErrNilStorage(t *testing.T) { hash := []byte("X") - cacher := &mock.CacherStub{} + cacher := &mock.HeadersCacherStub{} marshalizer := &mock.MarshalizerMock{} header, err := process.GetShardHeader(hash, cacher, marshalizer, nil) @@ -113,9 +113,9 @@ func TestGetShardHeaderShouldGetHeaderFromPool(t *testing.T) { hash := []byte("X") hdr := &block.Header{Nonce: 1} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return hdr, nil }, } marshalizer := &mock.MarshalizerMock{} @@ -129,9 +129,9 @@ func TestGetShardHeaderShouldGetHeaderFromStorage(t *testing.T) { hash := []byte("X") hdr := &block.Header{Nonce: 1} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") }, } marshalizer := &mock.MarshalizerMock{} @@ -166,7 +166,7 @@ func TestGetMetaHeaderShouldErrNilCacher(t *testing.T) { func TestGetMetaHeaderShouldErrNilMarshalizer(t *testing.T) { hash := []byte("X") - cacher := &mock.CacherStub{} + cacher := &mock.HeadersCacherStub{} storageService := &mock.ChainStorerMock{} header, err := process.GetMetaHeader(hash, cacher, nil, storageService) @@ -177,7 +177,7 @@ func TestGetMetaHeaderShouldErrNilMarshalizer(t *testing.T) { func TestGetMetaHeaderShouldErrNilStorage(t *testing.T) { hash := []byte("X") - cacher := &mock.CacherStub{} + cacher := &mock.HeadersCacherStub{} marshalizer := &mock.MarshalizerMock{} header, err := process.GetMetaHeader(hash, cacher, marshalizer, nil) @@ -189,9 +189,9 @@ func TestGetMetaHeaderShouldGetHeaderFromPool(t *testing.T) { hash := []byte("X") hdr := &block.MetaBlock{Nonce: 1} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return hdr, nil }, } marshalizer := &mock.MarshalizerMock{} @@ -205,9 +205,9 @@ func TestGetMetaHeaderShouldGetHeaderFromStorage(t *testing.T) { hash := []byte("X") hdr := &block.MetaBlock{Nonce: 1} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") }, } marshalizer := &mock.MarshalizerMock{} @@ -239,9 +239,9 @@ func TestGetShardHeaderFromPoolShouldErrNilCacher(t *testing.T) { func TestGetShardHeaderFromPoolShouldErrMissingHeader(t *testing.T) { hash := []byte("X") - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") }, } @@ -253,9 +253,9 @@ func TestGetShardHeaderFromPoolShouldErrMissingHeader(t *testing.T) { func TestGetShardHeaderFromPoolShouldErrWrongTypeAssertion(t *testing.T) { hash := []byte("X") - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.MetaBlock{}, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.MetaBlock{}, nil }, } @@ -268,9 +268,9 @@ func TestGetShardHeaderFromPoolShouldWork(t *testing.T) { hash := []byte("X") hdr := &block.Header{Nonce: 10} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return hdr, nil }, } @@ -290,9 +290,9 @@ func TestGetMetaHeaderFromPoolShouldErrNilCacher(t *testing.T) { func TestGetMetaHeaderFromPoolShouldErrMissingHeader(t *testing.T) { hash := []byte("X") - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") }, } @@ -303,10 +303,9 @@ func TestGetMetaHeaderFromPoolShouldErrMissingHeader(t *testing.T) { func TestGetMetaHeaderFromPoolShouldErrWrongTypeAssertion(t *testing.T) { hash := []byte("X") - - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.Header{}, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.Header{}, nil }, } @@ -319,9 +318,9 @@ func TestGetMetaHeaderFromPoolShouldWork(t *testing.T) { hash := []byte("X") hdr := &block.MetaBlock{Nonce: 10} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return hdr, nil }, } @@ -604,7 +603,6 @@ func TestGetShardHeaderWithNonceShouldErrNilCacher(t *testing.T) { nonce := uint64(1) shardId := uint32(0) - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} marshalizer := &mock.MarshalizerMock{} storageService := &mock.ChainStorerMock{} uint64Converter := &mock.Uint64ByteSliceConverterMock{} @@ -613,7 +611,6 @@ func TestGetShardHeaderWithNonceShouldErrNilCacher(t *testing.T) { nonce, shardId, nil, - uint64SyncMapCacher, marshalizer, storageService, uint64Converter) @@ -623,35 +620,11 @@ func TestGetShardHeaderWithNonceShouldErrNilCacher(t *testing.T) { assert.Equal(t, process.ErrNilCacher, err) } -func TestGetShardHeaderWithNonceShouldErrNilUint64SyncMapCacher(t *testing.T) { - nonce := uint64(1) - shardId := uint32(0) - - cacher := &mock.CacherStub{} - marshalizer := &mock.MarshalizerMock{} - storageService := &mock.ChainStorerMock{} - uint64Converter := &mock.Uint64ByteSliceConverterMock{} - - header, hash, err := process.GetShardHeaderWithNonce( - nonce, - shardId, - cacher, - nil, - marshalizer, - storageService, - uint64Converter) - - assert.Nil(t, header) - assert.Nil(t, hash) - assert.Equal(t, process.ErrNilUint64SyncMapCacher, err) -} - func TestGetShardHeaderWithNonceShouldErrNilMarshalizer(t *testing.T) { nonce := uint64(1) shardId := uint32(0) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} + cacher := &mock.HeadersCacherStub{} storageService := &mock.ChainStorerMock{} uint64Converter := &mock.Uint64ByteSliceConverterMock{} @@ -659,7 +632,6 @@ func TestGetShardHeaderWithNonceShouldErrNilMarshalizer(t *testing.T) { nonce, shardId, cacher, - uint64SyncMapCacher, nil, storageService, uint64Converter) @@ -673,8 +645,7 @@ func TestGetShardHeaderWithNonceShouldErrNilStorage(t *testing.T) { nonce := uint64(1) shardId := uint32(0) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} + cacher := &mock.HeadersCacherStub{} marshalizer := &mock.MarshalizerMock{} uint64Converter := &mock.Uint64ByteSliceConverterMock{} @@ -682,7 +653,6 @@ func TestGetShardHeaderWithNonceShouldErrNilStorage(t *testing.T) { nonce, shardId, cacher, - uint64SyncMapCacher, marshalizer, nil, uint64Converter) @@ -696,8 +666,7 @@ func TestGetShardHeaderWithNonceShouldErrNilUint64Converter(t *testing.T) { nonce := uint64(1) shardId := uint32(0) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} + cacher := &mock.HeadersCacherStub{} marshalizer := &mock.MarshalizerMock{} storageService := &mock.ChainStorerMock{} @@ -705,7 +674,6 @@ func TestGetShardHeaderWithNonceShouldErrNilUint64Converter(t *testing.T) { nonce, shardId, cacher, - uint64SyncMapCacher, marshalizer, storageService, nil) @@ -721,26 +689,9 @@ func TestGetShardHeaderWithNonceShouldGetHeaderFromPool(t *testing.T) { shardId := uint32(0) hdr := &block.Header{Nonce: nonce} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true - }, - } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil }, } marshalizer := &mock.MarshalizerMock{} @@ -751,7 +702,6 @@ func TestGetShardHeaderWithNonceShouldGetHeaderFromPool(t *testing.T) { nonce, shardId, cacher, - uint64SyncMapCacher, marshalizer, storageService, uint64Converter) @@ -766,12 +716,12 @@ func TestGetShardHeaderWithNonceShouldGetHeaderFromStorage(t *testing.T) { shardId := uint32(0) hdr := &block.Header{Nonce: nonce} - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil }, } + marshalizer := &mock.MarshalizerMock{} marshHdr, _ := marshalizer.Marshal(hdr) storageService := &mock.ChainStorerMock{ @@ -803,7 +753,6 @@ func TestGetShardHeaderWithNonceShouldGetHeaderFromStorage(t *testing.T) { nonce, shardId, cacher, - uint64SyncMapCacher, marshalizer, storageService, uint64Converter) @@ -814,7 +763,6 @@ func TestGetShardHeaderWithNonceShouldGetHeaderFromStorage(t *testing.T) { func TestGetMetaHeaderWithNonceShouldErrNilCacher(t *testing.T) { nonce := uint64(1) - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} marshalizer := &mock.MarshalizerMock{} storageService := &mock.ChainStorerMock{} uint64Converter := &mock.Uint64ByteSliceConverterMock{} @@ -822,7 +770,6 @@ func TestGetMetaHeaderWithNonceShouldErrNilCacher(t *testing.T) { header, hash, err := process.GetMetaHeaderWithNonce( nonce, nil, - uint64SyncMapCacher, marshalizer, storageService, uint64Converter) @@ -832,39 +779,16 @@ func TestGetMetaHeaderWithNonceShouldErrNilCacher(t *testing.T) { assert.Equal(t, process.ErrNilCacher, err) } -func TestGetMetaHeaderWithNonceShouldErrNilUint64SyncMapCacher(t *testing.T) { - nonce := uint64(1) - - cacher := &mock.CacherStub{} - marshalizer := &mock.MarshalizerMock{} - storageService := &mock.ChainStorerMock{} - uint64Converter := &mock.Uint64ByteSliceConverterMock{} - - header, hash, err := process.GetMetaHeaderWithNonce( - nonce, - cacher, - nil, - marshalizer, - storageService, - uint64Converter) - - assert.Nil(t, header) - assert.Nil(t, hash) - assert.Equal(t, process.ErrNilUint64SyncMapCacher, err) -} - func TestGetMetaHeaderWithNonceShouldErrNilMarshalizer(t *testing.T) { nonce := uint64(1) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} + cacher := &mock.HeadersCacherStub{} storageService := &mock.ChainStorerMock{} uint64Converter := &mock.Uint64ByteSliceConverterMock{} header, hash, err := process.GetMetaHeaderWithNonce( nonce, cacher, - uint64SyncMapCacher, nil, storageService, uint64Converter) @@ -877,15 +801,13 @@ func TestGetMetaHeaderWithNonceShouldErrNilMarshalizer(t *testing.T) { func TestGetMetaHeaderWithNonceShouldErrNilStorage(t *testing.T) { nonce := uint64(1) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} + cacher := &mock.HeadersCacherStub{} marshalizer := &mock.MarshalizerMock{} uint64Converter := &mock.Uint64ByteSliceConverterMock{} header, hash, err := process.GetMetaHeaderWithNonce( nonce, cacher, - uint64SyncMapCacher, marshalizer, nil, uint64Converter) @@ -898,15 +820,13 @@ func TestGetMetaHeaderWithNonceShouldErrNilStorage(t *testing.T) { func TestGetMetaHeaderWithNonceShouldErrNilUint64Converter(t *testing.T) { nonce := uint64(1) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} + cacher := &mock.HeadersCacherStub{} marshalizer := &mock.MarshalizerMock{} storageService := &mock.ChainStorerMock{} header, hash, err := process.GetMetaHeaderWithNonce( nonce, cacher, - uint64SyncMapCacher, marshalizer, storageService, nil) @@ -919,29 +839,11 @@ func TestGetMetaHeaderWithNonceShouldErrNilUint64Converter(t *testing.T) { func TestGetMetaHeaderWithNonceShouldGetHeaderFromPool(t *testing.T) { hash := []byte("X") nonce := uint64(1) - shardId := sharding.MetachainShardId hdr := &block.MetaBlock{Nonce: nonce} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true - }, - } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil }, } marshalizer := &mock.MarshalizerMock{} @@ -951,7 +853,6 @@ func TestGetMetaHeaderWithNonceShouldGetHeaderFromPool(t *testing.T) { header, _, _ := process.GetMetaHeaderWithNonce( nonce, cacher, - uint64SyncMapCacher, marshalizer, storageService, uint64Converter) @@ -965,10 +866,9 @@ func TestGetMetaHeaderWithNonceShouldGetHeaderFromStorage(t *testing.T) { nonceToByte := []byte("1") hdr := &block.MetaBlock{Nonce: nonce} - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil }, } marshalizer := &mock.MarshalizerMock{} @@ -1001,7 +901,6 @@ func TestGetMetaHeaderWithNonceShouldGetHeaderFromStorage(t *testing.T) { header, _, _ := process.GetMetaHeaderWithNonce( nonce, cacher, - uint64SyncMapCacher, marshalizer, storageService, uint64Converter) @@ -1013,41 +912,22 @@ func TestGetShardHeaderFromPoolWithNonceShouldErrNilCacher(t *testing.T) { nonce := uint64(1) shardId := uint32(0) - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} - - header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, nil, uint64SyncMapCacher) + header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, nil) assert.Nil(t, header) assert.Nil(t, hash) assert.Equal(t, process.ErrNilCacher, err) } -func TestGetShardHeaderFromPoolWithNonceShouldErrNilUint64SyncMapCacher(t *testing.T) { - nonce := uint64(1) - shardId := uint32(0) - - cacher := &mock.CacherStub{} - - header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher, nil) - assert.Nil(t, header) - assert.Nil(t, hash) - assert.Equal(t, process.ErrNilUint64SyncMapCacher, err) -} - func TestGetShardHeaderFromPoolWithNonceShouldErrMissingHashForHeaderNonceWhenShardIdHashMapIsNil(t *testing.T) { nonce := uint64(1) shardId := uint32(0) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { - return nil, false - }, - } + cacher := &mock.HeadersCacherStub{} - header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher, uint64SyncMapCacher) + header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher) assert.Nil(t, header) assert.Nil(t, hash) - assert.Equal(t, process.ErrMissingHashForHeaderNonce, err) + assert.Equal(t, process.ErrMissingHeader, err) } func TestGetShardHeaderFromPoolWithNonceShouldErrMissingHashForHeaderNonceWhenLoadFromShardIdHashMapFails(t *testing.T) { @@ -1055,25 +935,12 @@ func TestGetShardHeaderFromPoolWithNonceShouldErrMissingHashForHeaderNonceWhenLo nonce := uint64(1) shardId := uint32(0) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - return nil, false - }, - }, true - } - - return nil, false - }, - } + cacher := &mock.HeadersCacherStub{} - header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher, uint64SyncMapCacher) + header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher) assert.Nil(t, header) assert.Nil(t, hash) - assert.Equal(t, process.ErrMissingHashForHeaderNonce, err) + assert.Equal(t, process.ErrMissingHeader, err) } func TestGetShardHeaderFromPoolWithNonceShouldErrMissingHeader(t *testing.T) { @@ -1081,30 +948,13 @@ func TestGetShardHeaderFromPoolWithNonceShouldErrMissingHeader(t *testing.T) { nonce := uint64(1) shardId := uint32(0) - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") }, } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - return nil, false - }, - } - - header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher, uint64SyncMapCacher) + header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher) assert.Nil(t, header) assert.Nil(t, hash) assert.Equal(t, process.ErrMissingHeader, err) @@ -1115,33 +965,16 @@ func TestGetShardHeaderFromPoolWithNonceShouldErrWrongTypeAssertion(t *testing.T nonce := uint64(1) shardId := uint32(0) - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.MetaBlock{}, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.MetaBlock{}, nil }, } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - return nil, false - }, - } - - header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher, uint64SyncMapCacher) + header, hash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher) assert.Nil(t, header) assert.Nil(t, hash) - assert.Equal(t, process.ErrWrongTypeAssertion, err) + assert.Equal(t, process.ErrMissingHeader, err) } func TestGetShardHeaderFromPoolWithNonceShouldWork(t *testing.T) { @@ -1150,30 +983,13 @@ func TestGetShardHeaderFromPoolWithNonceShouldWork(t *testing.T) { shardId := uint32(0) hdr := &block.Header{Nonce: nonce} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true - }, - } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil }, } - header, headerHash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher, uint64SyncMapCacher) + header, headerHash, err := process.GetShardHeaderFromPoolWithNonce(nonce, shardId, cacher) assert.Nil(t, err) assert.Equal(t, hash, headerHash) assert.Equal(t, hdr, header) @@ -1182,95 +998,46 @@ func TestGetShardHeaderFromPoolWithNonceShouldWork(t *testing.T) { func TestGetMetaHeaderFromPoolWithNonceShouldErrNilCacher(t *testing.T) { nonce := uint64(1) - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{} - - header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, nil, uint64SyncMapCacher) + header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, nil) assert.Nil(t, header) assert.Nil(t, hash) assert.Equal(t, process.ErrNilCacher, err) } -func TestGetMetaHeaderFromPoolWithNonceShouldErrNilUint64SyncMapCacher(t *testing.T) { - nonce := uint64(1) - - cacher := &mock.CacherStub{} - - header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher, nil) - assert.Nil(t, header) - assert.Nil(t, hash) - assert.Equal(t, process.ErrNilUint64SyncMapCacher, err) -} - func TestGetMetaHeaderFromPoolWithNonceShouldErrMissingHashForHeaderNonceWhenShardIdHashMapIsNil(t *testing.T) { nonce := uint64(1) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { - return nil, false - }, - } + cacher := &mock.HeadersCacherStub{} - header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher, uint64SyncMapCacher) + header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher) assert.Nil(t, header) assert.Nil(t, hash) - assert.Equal(t, process.ErrMissingHashForHeaderNonce, err) + assert.Equal(t, process.ErrMissingHeader, err) } func TestGetMetaHeaderFromPoolWithNonceShouldErrMissingHashForHeaderNonceWhenLoadFromShardIdHashMapFails(t *testing.T) { hash := []byte("X") nonce := uint64(1) - cacher := &mock.CacherStub{} - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - return nil, false - }, - }, true - } - - return nil, false - }, - } + cacher := &mock.HeadersCacherStub{} - header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher, uint64SyncMapCacher) + header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher) assert.Nil(t, header) assert.Nil(t, hash) - assert.Equal(t, process.ErrMissingHashForHeaderNonce, err) + assert.Equal(t, process.ErrMissingHeader, err) } func TestGetMetaHeaderFromPoolWithNonceShouldErrMissingHeader(t *testing.T) { hash := []byte("X") nonce := uint64(1) - shardId := sharding.MetachainShardId - - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") }, } - header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher, uint64SyncMapCacher) + header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher) assert.Nil(t, header) assert.Nil(t, hash) assert.Equal(t, process.ErrMissingHeader, err) @@ -1279,32 +1046,14 @@ func TestGetMetaHeaderFromPoolWithNonceShouldErrMissingHeader(t *testing.T) { func TestGetMetaHeaderFromPoolWithNonceShouldErrWrongTypeAssertion(t *testing.T) { hash := []byte("X") nonce := uint64(1) - shardId := sharding.MetachainShardId - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.Header{}, true - }, - } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - - return nil, false + cacher := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{&block.Header{}}, [][]byte{hash}, nil }, } - header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher, uint64SyncMapCacher) + header, hash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher) assert.Nil(t, header) assert.Nil(t, hash) assert.Equal(t, process.ErrWrongTypeAssertion, err) @@ -1313,32 +1062,15 @@ func TestGetMetaHeaderFromPoolWithNonceShouldErrWrongTypeAssertion(t *testing.T) func TestGetMetaHeaderFromPoolWithNonceShouldWork(t *testing.T) { hash := []byte("X") nonce := uint64(1) - shardId := sharding.MetachainShardId hdr := &block.MetaBlock{Nonce: nonce} - cacher := &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return hdr, true + cacher := &mock.HeadersCacherStub{ + GetHeaderByNonceAndShardIdCalled: func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil }, } - uint64SyncMapCacher := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(n uint64) (dataRetriever.ShardIdHashMap, bool) { - if n == nonce { - return &mock.ShardIdHasMapMock{ - LoadCalled: func(s uint32) ([]byte, bool) { - if s == shardId { - return hash, true - } - - return nil, false - }, - }, true - } - return nil, false - }, - } - header, headerHash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher, uint64SyncMapCacher) + header, headerHash, err := process.GetMetaHeaderFromPoolWithNonce(nonce, cacher) assert.Nil(t, err) assert.Equal(t, hash, headerHash) assert.Equal(t, hdr, header) @@ -1903,7 +1635,8 @@ func TestGetTransactionHandlerShouldErrNilShardedDataCacherNotifier(t *testing.T hash, nil, storageService, - marshalizer) + marshalizer, + false) assert.Nil(t, tx) assert.Equal(t, process.ErrNilShardedDataCacherNotifier, err) @@ -1921,7 +1654,8 @@ func TestGetTransactionHandlerShouldErrNilStorage(t *testing.T) { hash, shardedDataCacherNotifier, nil, - marshalizer) + marshalizer, + false) assert.Nil(t, tx) assert.Equal(t, process.ErrNilStorage, err) @@ -1939,7 +1673,8 @@ func TestGetTransactionHandlerShouldErrNilMarshalizer(t *testing.T) { hash, shardedDataCacherNotifier, storageService, - nil) + nil, + false) assert.Nil(t, tx) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -1967,7 +1702,8 @@ func TestGetTransactionHandlerShouldGetTransactionFromPool(t *testing.T) { hash, shardedDataCacherNotifier, storageService, - marshalizer) + marshalizer, + false) assert.Nil(t, err) assert.Equal(t, tx, txFromPool) @@ -2006,7 +1742,8 @@ func TestGetTransactionHandlerShouldGetTransactionFromStorage(t *testing.T) { hash, shardedDataCacherNotifier, storageService, - marshalizer) + marshalizer, + false) assert.Nil(t, err) assert.Equal(t, tx, txFromStorage) @@ -2019,7 +1756,8 @@ func TestGetTransactionHandlerFromPoolShouldErrNilShardedDataCacherNotifier(t *t 0, 0, hash, - nil) + nil, + false) assert.Nil(t, tx) assert.Equal(t, process.ErrNilShardedDataCacherNotifier, err) @@ -2038,7 +1776,8 @@ func TestGetTransactionHandlerFromPoolShouldErrNilStorage(t *testing.T) { 0, 0, hash, - shardedDataCacherNotifier) + shardedDataCacherNotifier, + false) assert.Nil(t, tx) assert.Equal(t, process.ErrNilStorage, err) @@ -2061,7 +1800,8 @@ func TestGetTransactionHandlerFromPoolShouldErrTxNotFound(t *testing.T) { 0, 0, hash, - shardedDataCacherNotifier) + shardedDataCacherNotifier, + false) assert.Nil(t, tx) assert.Equal(t, process.ErrTxNotFound, err) @@ -2084,7 +1824,8 @@ func TestGetTransactionHandlerFromPoolShouldErrInvalidTxInPool(t *testing.T) { 0, 0, hash, - shardedDataCacherNotifier) + shardedDataCacherNotifier, + false) assert.Nil(t, tx) assert.Equal(t, process.ErrInvalidTxInPool, err) @@ -2108,7 +1849,8 @@ func TestGetTransactionHandlerFromPoolShouldWork(t *testing.T) { 0, 0, hash, - shardedDataCacherNotifier) + shardedDataCacherNotifier, + false) assert.Nil(t, err) assert.Equal(t, txFromPool, tx) diff --git a/process/constants.go b/process/constants.go index 4ec2b9615e6..c6cf5fd86fa 100644 --- a/process/constants.go +++ b/process/constants.go @@ -32,12 +32,15 @@ const ( InvalidTransaction ) -// ShardBlockFinality defines the block finality which is used in shards (the real finality in shards is given +// BlockFinality defines the block finality which is used in meta-chain/shards (the real finality in shards is given // by meta-chain) -const ShardBlockFinality = 1 +const BlockFinality = 1 -// MetaBlockFinality defines the block finality which is used in meta-chain -const MetaBlockFinality = 1 +// MetaBlockValidity defines the block validity which is when checking a metablock +const MetaBlockValidity = 1 + +// EpochChangeGracePeriod defines the allowed round numbers till the shard has to change the epoch +const EpochChangeGracePeriod = 1 // MaxHeaderRequestsAllowed defines the maximum number of missing cross-shard headers (gaps) which could be requested // in one round, when node processes a received block @@ -47,6 +50,11 @@ const MaxHeaderRequestsAllowed = 10 // (hashes of: mini blocks, txs, meta-headers, shard-headers) which could be added in one block const MaxItemsInBlock = 15000 +// NumTxPerSenderBatchForFillingMiniblock defines the number of transactions to be drawn +// from the transactions pool, for a specific sender, in a single pass. +// Drawing transactions for a miniblock happens in multiple passes, until "MaxItemsInBlock" are drawn. +const NumTxPerSenderBatchForFillingMiniblock = 10 + // MinItemsInBlock defines the minimum threshold which could be set, and represents the maximum number of items // (hashes of: mini blocks, txs, meta-headers, shard-headers) which could be added in one block const MinItemsInBlock = 15000 @@ -64,7 +72,7 @@ const MaxRequestsWithTimeoutAllowed = 5 const MaxHeadersToRequestInAdvance = 10 // RoundModulusTrigger defines a round modulus on which a trigger for an action will be released -const RoundModulusTrigger = 10 +const RoundModulusTrigger = 5 // MaxOccupancyPercentageAllowed defines the maximum occupancy percentage allowed to be used, // from the full pool capacity, for the received data which are not needed in the near future @@ -72,7 +80,7 @@ const MaxOccupancyPercentageAllowed = float64(0.9) // MaxRoundsWithoutCommittedBlock defines the maximum rounds to wait for a new block to be committed, // before a special action to be applied -const MaxRoundsWithoutCommittedBlock = 20 +const MaxRoundsWithoutCommittedBlock = 10 // MaxNoncesWithoutCrossNotarized defines the maximum nonces to wait for a new block to be cross notarized, // before a special action to be applied @@ -80,3 +88,22 @@ const MaxNoncesWithoutCrossNotarized = 100 // MinForkRound represents the minimum fork round set by a notarized header received const MinForkRound = uint64(0) + +// TxPoolThresholdEvictSenders instructs tx pool eviction algorithm to not evict senders, +// unless the number of senders is larger than this threshold +const TxPoolThresholdEvictSenders = uint32(1000) + +// TxPoolNumOldestSendersToEvict instructs tx pool eviction algorithm to remove this many senders when eviction takes place +const TxPoolNumOldestSendersToEvict = uint32(500) + +// TxPoolALotOfTransactionsForASender instructs tx pool eviction algorithm to tag a sender with more transactions than this value +// as a "sender with a lot of transactions" +const TxPoolALotOfTransactionsForASender = uint32(500) + +// TxPoolNumTxsToEvictForASenderWithALot instructs tx pool eviction algorithm to remove this many transactions +// for "a sender with a lot of transactions" when eviction takes place +const TxPoolNumTxsToEvictForASenderWithALot = uint32(100) + +// MaxNonceDifferences represents the maximum nonce difference between received and committed header, so the received one +// to be stored in advance in block tracker +const MaxNonceDifferences = uint64(10000) diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 7d111d87cc1..eea8964a856 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -1,15 +1,19 @@ package coordinator import ( + "bytes" "sort" "sync" "time" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/sharding" @@ -22,6 +26,8 @@ type transactionCoordinator struct { shardCoordinator sharding.Coordinator accounts state.AccountsAdapter miniBlockPool storage.Cacher + hasher hashing.Hasher + marshalizer marshal.Marshalizer mutPreProcessor sync.RWMutex txPreProcessors map[block.Type]process.PreProcessor @@ -40,6 +46,8 @@ type transactionCoordinator struct { // NewTransactionCoordinator creates a transaction coordinator to run and coordinate preprocessors and processors func NewTransactionCoordinator( + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, accounts state.AccountsAdapter, miniBlockPool storage.Cacher, @@ -70,11 +78,19 @@ func NewTransactionCoordinator( if check.IfNil(gasHandler) { return nil, process.ErrNilGasHandler } + if check.IfNil(hasher) { + return nil, process.ErrNilHasher + } + if check.IfNil(marshalizer) { + return nil, process.ErrNilMarshalizer + } tc := &transactionCoordinator{ shardCoordinator: shardCoordinator, accounts: accounts, gasHandler: gasHandler, + hasher: hasher, + marshalizer: marshalizer, } tc.miniBlockPool = miniBlockPool @@ -180,7 +196,6 @@ func (tc *transactionCoordinator) IsDataPreparedForProcessing(haveTime func() ti preproc := tc.getPreProcessor(blockType) if preproc == nil { wg.Done() - return } @@ -354,7 +369,7 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( } preproc := tc.getPreProcessor(blockType) - if preproc == nil || preproc.IsInterfaceNil() { + if check.IfNil(preproc) { return process.ErrMissingPreProcessor } @@ -381,8 +396,8 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe nrTxAdded := uint32(0) nrMiniBlocksProcessed := 0 - if hdr == nil || hdr.IsInterfaceNil() { - return miniBlocks, nrTxAdded, true + if check.IfNil(hdr) { + return miniBlocks, nrTxAdded, false } crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(tc.shardCoordinator.SelfId()) @@ -409,7 +424,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe } preproc := tc.getPreProcessor(miniBlock.Type) - if preproc == nil || preproc.IsInterfaceNil() { + if check.IfNil(preproc) { continue } @@ -441,6 +456,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe } allMBsProcessed := nrMiniBlocksProcessed == len(crossMiniBlockHashes) + return miniBlocks, nrTxAdded, allMBsProcessed } @@ -708,22 +724,16 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( } // VerifyCreatedBlockTransactions checks whether the created transactions are the same as the one proposed -func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body) error { +func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(hdr data.HeaderHandler, body block.Body) error { tc.mutInterimProcessors.RLock() defer tc.mutInterimProcessors.RUnlock() errMutex := sync.Mutex{} var errFound error - // TODO: think if it is good in parallel or it is needed in sequences + wg := sync.WaitGroup{} wg.Add(len(tc.interimProcessors)) - for key, interimProc := range tc.interimProcessors { - if key == block.RewardsBlock { - // this has to be processed last - wg.Done() - continue - } - + for _, interimProc := range tc.interimProcessors { go func(intermediateProcessor process.IntermediateTransactionHandler) { err := intermediateProcessor.VerifyInterMiniBlocks(body) if err != nil { @@ -741,18 +751,45 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body return errFound } - interimProc := tc.getInterimProcessor(block.RewardsBlock) - if interimProc == nil { - return nil + if check.IfNil(hdr) { + return process.ErrNilBlockHeader + } + + createdReceiptHash, err := tc.CreateReceiptsHash() + if err != nil { + return err + } + + if !bytes.Equal(createdReceiptHash, hdr.GetReceiptsHash()) { + return process.ErrReceiptsHashMissmatch } - return interimProc.VerifyInterMiniBlocks(body) + return nil +} + +func (tc *transactionCoordinator) CreateReceiptsHash() ([]byte, error) { + allReceiptsHashes := make([]byte, 0) + + for _, value := range tc.keysInterimProcs { + interProc, ok := tc.interimProcessors[value] + if !ok { + continue + } + + mb := interProc.GetCreatedInShardMiniBlock() + currHash, err := core.CalculateHash(tc.marshalizer, tc.hasher, mb) + if err != nil { + return nil, err + } + + allReceiptsHashes = append(allReceiptsHashes, currHash...) + } + + finalReceiptHash, err := core.CalculateHash(tc.marshalizer, tc.hasher, allReceiptsHashes) + return finalReceiptHash, err } // IsInterfaceNil returns true if there is no value under the interface func (tc *transactionCoordinator) IsInterfaceNil() bool { - if tc == nil { - return true - } - return false + return tc == nil } diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 80516824c3d..f883ee70a1d 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -19,7 +19,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -107,14 +107,6 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { TransactionsCalled: txCalled, UnsignedTransactionsCalled: unsignedTxHandler, RewardTransactionsCalled: rewardTxCalled, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{ - MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, - HasCalled: func(nonce uint64, shardId uint32) bool { - return true - }, - } - }, MetaBlocksCalled: func() storage.Cacher { return &mock.CacherStub{ GetCalled: func(key []byte) (value interface{}, ok bool) { @@ -163,9 +155,9 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { } return cs }, - HeadersCalled: func() storage.Cacher { - cs := &mock.CacherStub{} - cs.RegisterHandlerCalled = func(i func(key []byte)) { + HeadersCalled: func() dataRetriever.HeadersPool { + cs := &mock.HeadersCacherStub{} + cs.RegisterHandlerCalled = func(i func(header data.HeaderHandler, key []byte)) { } return cs }, @@ -203,11 +195,9 @@ func generateTestCache() storage.Cacher { } func generateTestUnit() storage.Storer { - memDB, _ := memorydb.New() - storer, _ := storageUnit.NewStorageUnit( generateTestCache(), - memDB, + memorydb.New(), ) return storer @@ -222,14 +212,54 @@ func initAccountsMock() *mock.AccountsStub { } } +func TestNewTransactionCoordinator_NilHasher(t *testing.T) { + t.Parallel() + + tc, err := NewTransactionCoordinator( + nil, + &mock.MarshalizerMock{}, + mock.NewMultipleShardsCoordinatorMock(), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock().MiniBlocks(), + &mock.RequestHandlerStub{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + &mock.GasHandlerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewTransactionCoordinator_NilMarshalizer(t *testing.T) { + t.Parallel() + + tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + nil, + mock.NewMultipleShardsCoordinatorMock(), + &mock.AccountsStub{}, + mock.NewPoolsHolderMock().MiniBlocks(), + &mock.RequestHandlerStub{}, + &mock.PreProcessorContainerMock{}, + &mock.InterimProcessorContainerMock{}, + &mock.GasHandlerMock{}, + ) + + assert.Nil(t, tc) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + func TestNewTransactionCoordinator_NilShardCoordinator(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, nil, &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -243,10 +273,12 @@ func TestNewTransactionCoordinator_NilAccountsStub(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), nil, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -260,10 +292,12 @@ func TestNewTransactionCoordinator_NilDataPool(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, nil, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -277,6 +311,8 @@ func TestNewTransactionCoordinator_NilRequestHandler(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), @@ -294,10 +330,12 @@ func TestNewTransactionCoordinator_NilPreProcessor(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, nil, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -311,10 +349,12 @@ func TestNewTransactionCoordinator_NilInterProcessor(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, nil, &mock.GasHandlerMock{}, @@ -328,10 +368,12 @@ func TestNewTransactionCoordinator_NilGasHandler(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, nil, @@ -345,10 +387,12 @@ func TestNewTransactionCoordinator_OK(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -362,10 +406,12 @@ func TestTransactionCoordinator_SeparateBodyNil(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -381,10 +427,12 @@ func TestTransactionCoordinator_SeparateBody(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -416,7 +464,7 @@ func createPreProcessorContainer() process.PreProcessorsContainer { initDataPool([]byte("tx_hash0")), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { return nil @@ -466,7 +514,7 @@ func createPreProcessorContainerWithDataPool( dataPool, &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { return nil @@ -532,10 +580,12 @@ func TestTransactionCoordinator_CreateBlockStarted(t *testing.T) { totalGasConsumed := uint64(0) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -564,10 +614,12 @@ func TestTransactionCoordinator_CreateMarshalizedDataNilBody(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -604,10 +656,12 @@ func TestTransactionCoordinator_CreateMarshalizedData(t *testing.T) { t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -626,10 +680,12 @@ func TestTransactionCoordinator_CreateMarshalizedDataWithTxsAndScr(t *testing.T) interimContainer := createInterimProcessorContainer() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainer(), interimContainer, &mock.GasHandlerMock{}, @@ -679,10 +735,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNi t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -699,7 +757,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNi assert.Equal(t, 0, len(mbs)) assert.Equal(t, uint32(0), txs) - assert.True(t, finalized) + assert.False(t, finalized) } func createTestMetablock() *block.MetaBlock { @@ -728,10 +786,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNo t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -755,10 +815,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothing t.Parallel() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -798,7 +860,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes tdp, &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { return nil @@ -829,10 +891,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -911,7 +975,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr }, &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { return nil @@ -932,10 +996,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, mock.NewPoolsHolderMock().MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -957,10 +1023,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNoTime(t *t t.Parallel() tdp := initDataPool([]byte("tx_hash1")) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -983,10 +1051,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNoSpace(t * totalGasConsumed := uint64(0) tdp := initDataPool([]byte("tx_hash1")) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -1011,7 +1081,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNoSpace(t * func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMe(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool @@ -1019,10 +1089,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMe(t *testing nrShards := uint32(5) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1056,17 +1128,19 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMin t.Parallel() nrShards := uint32(5) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool } tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1118,17 +1192,19 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMin numMiniBlocks := allTxs / numTxsToAdd nrShards := uint32(5) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: nrShards}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool } tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool( tdp, &mock.FeeHandlerStub{ @@ -1185,17 +1261,19 @@ func TestTransactionCoordinator_CompactAndExpandMiniblocksShouldWork(t *testing. numMiniBlocks := uint64(numTxsPerBulk / numTxsToAdd) nrShards := uint32(5) - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool } tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool( tdp, &mock.FeeHandlerStub{ @@ -1248,7 +1326,7 @@ func TestTransactionCoordinator_CompactAndExpandMiniblocksShouldWork(t *testing. func TestTransactionCoordinator_GetAllCurrentUsedTxs(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ := txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 100000, Shards: 1}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool @@ -1256,10 +1334,12 @@ func TestTransactionCoordinator_GetAllCurrentUsedTxs(t *testing.T) { nrShards := uint32(5) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -1304,10 +1384,12 @@ func TestTransactionCoordinator_RequestBlockTransactionsNilBody(t *testing.T) { tdp := initDataPool([]byte("tx_hash1")) nrShards := uint32(5) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1331,10 +1413,12 @@ func TestTransactionCoordinator_RequestBlockTransactionsRequestOne(t *testing.T) tdp := initDataPool(txHashInPool) nrShards := uint32(5) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1365,10 +1449,12 @@ func TestTransactionCoordinator_IsDataPreparedForProcessing(t *testing.T) { tdp := initDataPool([]byte("tx_hash1")) nrShards := uint32(5) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(nrShards), &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1419,7 +1505,7 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { txHash2Requested := int32(0) txHash3Requested := int32(0) - requestHandler := &mock.RequestHandlerMock{} + requestHandler := &mock.RequestHandlerStub{} requestHandler.RequestTransactionHandlerCalled = func(destShardID uint32, txHashes [][]byte) { if containsHash(txHashes, txHash1) { atomic.AddInt32(&txHash1Requested, 1) @@ -1453,6 +1539,8 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), accounts, dataPool.MiniBlocks(), @@ -1478,10 +1566,12 @@ func TestTransactionCoordinator_SaveBlockDataToStorage(t *testing.T) { txHash := []byte("tx_hash1") tdp := initDataPool(txHash) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1515,10 +1605,12 @@ func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { txHash := []byte("tx_hash1") tdp := initDataPool(txHash) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1559,10 +1651,12 @@ func TestTransactionCoordinator_RemoveBlockDataFromPool(t *testing.T) { txHash := []byte("tx_hash1") dataPool := initDataPool(txHash) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), dataPool.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1597,7 +1691,7 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing dataPool, &mock.AddressConverterMock{}, accounts, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { return process.ErrHigherNonceInTransaction @@ -1622,10 +1716,12 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), dataPool.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1666,10 +1762,12 @@ func TestTransactionCoordinator_ProcessBlockTransaction(t *testing.T) { txHash := []byte("tx_hash1") dataPool := initDataPool(txHash) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), dataPool.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(dataPool, FeeHandlerMock()), &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{}, @@ -1713,7 +1811,7 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { nrCalled := 0 mutex := sync.Mutex{} - requestHandler := &mock.RequestHandlerMock{ + requestHandler := &mock.RequestHandlerStub{ RequestMiniBlockHandlerCalled: func(destShardID uint32, miniblockHash []byte) { mutex.Lock() nrCalled++ @@ -1747,6 +1845,8 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, shardCoordinator, accounts, dataPool.MiniBlocks(), @@ -1806,15 +1906,15 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ Nonce: tx1Nonce, - Data: string(txHash1), + Data: txHash1, }, cacheId) dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ Nonce: tx2Nonce, - Data: string(txHash2), + Data: txHash2, }, cacheId) dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ Nonce: tx3Nonce, - Data: string(txHash3), + Data: txHash3, }, cacheId) tx1ExecutionResult := uint64(0) @@ -1841,17 +1941,17 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot dataPool, &mock.AddressConverterMock{}, accounts, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { //execution, in this context, means moving the tx nonce to itx corresponding execution result variable - if transaction.Data == string(txHash1) { + if bytes.Equal(transaction.Data, txHash1) { tx1ExecutionResult = transaction.Nonce } - if transaction.Data == string(txHash2) { + if bytes.Equal(transaction.Data, txHash2) { tx2ExecutionResult = transaction.Nonce } - if transaction.Data == string(txHash3) { + if bytes.Equal(transaction.Data, txHash3) { tx3ExecutionResult = transaction.Nonce } @@ -1883,10 +1983,12 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), accounts, dataPool.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -1942,15 +2044,15 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR cacheId := process.ShardCacherIdentifier(senderShardId, receiverShardId) dataPool.Transactions().AddData(txHash1, &transaction.Transaction{ Nonce: tx1Nonce, - Data: string(txHash1), + Data: txHash1, }, cacheId) dataPool.Transactions().AddData(txHash2, &transaction.Transaction{ Nonce: tx2Nonce, - Data: string(txHash2), + Data: txHash2, }, cacheId) dataPool.Transactions().AddData(txHash3, &transaction.Transaction{ Nonce: tx3Nonce, - Data: string(txHash3), + Data: txHash3, }, cacheId) currentJournalLen := 445 @@ -1977,10 +2079,10 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR dataPool, &mock.AddressConverterMock{}, accounts, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) error { - if transaction.Data == string(txHash2) { + if bytes.Equal(transaction.Data, txHash2) { return process.ErrHigherNonceInTransaction } return nil @@ -2014,10 +2116,12 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR totalGasConsumed := uint64(0) tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), accounts, dataPool.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, container, &mock.InterimProcessorContainerMock{}, &mock.GasHandlerMock{ @@ -2063,10 +2167,12 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, shardCoordinator, &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, container, &mock.GasHandlerMock{}, @@ -2074,19 +2180,22 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi assert.Nil(t, err) assert.NotNil(t, tc) - err = tc.VerifyCreatedBlockTransactions(nil) - assert.Nil(t, err) + err = tc.VerifyCreatedBlockTransactions(&block.Header{ReceiptsHash: []byte("receipt")}, nil) + assert.Equal(t, process.ErrReceiptsHashMissmatch, err) body := block.Body{&block.MiniBlock{Type: block.TxBlock}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) + err = tc.VerifyCreatedBlockTransactions(&block.Header{ReceiptsHash: []byte("receipt")}, body) + assert.Equal(t, process.ErrReceiptsHashMissmatch, err) - body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId()}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) + body = block.Body{&block.MiniBlock{ + Type: block.SmartContractResultBlock, + ReceiverShardID: shardCoordinator.SelfId(), + SenderShardID: shardCoordinator.SelfId() + 1}} + err = tc.VerifyCreatedBlockTransactions(&block.Header{ReceiptsHash: []byte("receipt")}, body) + assert.Equal(t, process.ErrReceiptsHashMissmatch, err) body = block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1}} - err = tc.VerifyCreatedBlockTransactions(body) + err = tc.VerifyCreatedBlockTransactions(&block.Header{ReceiptsHash: []byte("receipt")}, body) assert.Equal(t, process.ErrNilMiniBlocks, err) } @@ -2098,7 +2207,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) adrConv := &mock.AddressConverterMock{} - preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( + interFactory, _ := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -2108,13 +2217,15 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { tdp, economicsData, ) - container, _ := preFactory.Create() + container, _ := interFactory.Create() tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, shardCoordinator, &mock.AccountsStub{}, tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.PreProcessorContainerMock{}, container, &mock.GasHandlerMock{}, @@ -2176,8 +2287,8 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { assert.Nil(t, err) body := block.Body{&block.MiniBlock{Type: block.SmartContractResultBlock, ReceiverShardID: shardCoordinator.SelfId() + 1, TxHashes: [][]byte{scrHash}}} - err = tc.VerifyCreatedBlockTransactions(body) - assert.Nil(t, err) + err = tc.VerifyCreatedBlockTransactions(&block.Header{}, body) + assert.Equal(t, process.ErrReceiptsHashMissmatch, err) } func TestTransactionCoordinator_SaveBlockDataToStorageSaveIntermediateTxsErrors(t *testing.T) { @@ -2187,10 +2298,12 @@ func TestTransactionCoordinator_SaveBlockDataToStorageSaveIntermediateTxsErrors( tdp := initDataPool(txHash) retError := errors.New("save error") tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{ KeysCalled: func() []block.Type { @@ -2229,10 +2342,12 @@ func TestTransactionCoordinator_SaveBlockDataToStorageCallsSaveIntermediate(t *t tdp := initDataPool(txHash) intermediateTxWereSaved := false tc, err := NewTransactionCoordinator( + &mock.HasherMock{}, + &mock.MarshalizerMock{}, mock.NewMultiShardsCoordinatorMock(3), initAccountsMock(), tdp.MiniBlocks(), - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, createPreProcessorContainerWithDataPool(tdp, FeeHandlerMock()), &mock.InterimProcessorContainerMock{ KeysCalled: func() []block.Type { diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go index af177bf6cfb..ab2e766d1cc 100644 --- a/process/coordinator/transactionType_test.go +++ b/process/coordinator/transactionType_test.go @@ -174,7 +174,7 @@ func TestTxTypeHandler_ComputeTransactionTypeScDeployment(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = make([]byte, addressConverter.AddressLen()) - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) txType, err := tth.ComputeTransactionType(tx) @@ -190,7 +190,7 @@ func TestTxTypeHandler_ComputeTransactionTypeScInvoking(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) _, acntDst := createAccounts(tx) @@ -221,7 +221,7 @@ func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) _, acntDst := createAccounts(tx) diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go index d574e61b8e3..b683bf7ecca 100644 --- a/process/economics/economicsData.go +++ b/process/economics/economicsData.go @@ -11,17 +11,20 @@ import ( // EconomicsData will store information about economics type EconomicsData struct { - rewardsValue *big.Int - communityPercentage float64 - leaderPercentage float64 - burnPercentage float64 - maxGasLimitPerBlock uint64 - minGasPrice uint64 - minGasLimit uint64 - communityAddress string - burnAddress string - stakeValue *big.Int - unBoundPeriod uint64 + rewardsValue *big.Int + communityPercentage float64 + leaderPercentage float64 + burnPercentage float64 + maxGasLimitPerBlock uint64 + gasPerDataByte uint64 + dataLimitForBaseCalc uint64 + minGasPrice uint64 + minGasLimit uint64 + communityAddress string + burnAddress string + stakeValue *big.Int + unBoundPeriod uint64 + ratingsData *RatingsData } const float64EqualityThreshold = 1e-9 @@ -44,22 +47,30 @@ func NewEconomicsData(economics *config.ConfigEconomics) (*EconomicsData, error) return nil, err } + rd, err := NewRatingsData(economics.RatingSettings) + if err != nil { + return nil, err + } + if data.maxGasLimitPerBlock < data.minGasLimit { return nil, process.ErrInvalidMaxGasLimitPerBlock } return &EconomicsData{ - rewardsValue: data.rewardsValue, - communityPercentage: economics.RewardsSettings.CommunityPercentage, - leaderPercentage: economics.RewardsSettings.LeaderPercentage, - burnPercentage: economics.RewardsSettings.BurnPercentage, - maxGasLimitPerBlock: data.maxGasLimitPerBlock, - minGasPrice: data.minGasPrice, - minGasLimit: data.minGasLimit, - communityAddress: economics.EconomicsAddresses.CommunityAddress, - burnAddress: economics.EconomicsAddresses.BurnAddress, - stakeValue: data.stakeValue, - unBoundPeriod: data.unBoundPeriod, + rewardsValue: data.rewardsValue, + communityPercentage: economics.RewardsSettings.CommunityPercentage, + leaderPercentage: economics.RewardsSettings.LeaderPercentage, + burnPercentage: economics.RewardsSettings.BurnPercentage, + maxGasLimitPerBlock: data.maxGasLimitPerBlock, + minGasPrice: data.minGasPrice, + minGasLimit: data.minGasLimit, + communityAddress: economics.EconomicsAddresses.CommunityAddress, + burnAddress: economics.EconomicsAddresses.BurnAddress, + stakeValue: data.stakeValue, + unBoundPeriod: data.unBoundPeriod, + gasPerDataByte: data.gasPerDataByte, + dataLimitForBaseCalc: data.dataLimitForBaseCalc, + ratingsData: rd, }, nil } @@ -99,13 +110,25 @@ func convertValues(economics *config.ConfigEconomics) (*EconomicsData, error) { return nil, process.ErrInvalidMaxGasLimitPerBlock } + gasPerDataByte, err := strconv.ParseUint(economics.FeeSettings.GasPerDataByte, conversionBase, bitConversionSize) + if err != nil { + return nil, process.ErrInvalidGasPerDataByte + } + + dataLimitForBaseCalc, err := strconv.ParseUint(economics.FeeSettings.DataLimitForBaseCalc, conversionBase, bitConversionSize) + if err != nil { + return nil, process.ErrInvalidGasPerDataByte + } + return &EconomicsData{ - rewardsValue: rewardsValue, - minGasPrice: minGasPrice, - minGasLimit: minGasLimit, - stakeValue: stakeValue, - unBoundPeriod: unBoundPeriod, - maxGasLimitPerBlock: maxGasLimitPerBlock, + rewardsValue: rewardsValue, + minGasPrice: minGasPrice, + minGasLimit: minGasLimit, + stakeValue: stakeValue, + unBoundPeriod: unBoundPeriod, + maxGasLimitPerBlock: maxGasLimitPerBlock, + gasPerDataByte: gasPerDataByte, + dataLimitForBaseCalc: dataLimitForBaseCalc, }, nil } @@ -191,10 +214,15 @@ func (ed *EconomicsData) MaxGasLimitPerBlock() uint64 { func (ed *EconomicsData) ComputeGasLimit(tx process.TransactionWithFeeHandler) uint64 { gasLimit := ed.minGasLimit - //TODO: change this method of computing the gas limit of a notarizing tx - // it should follow an exponential curve as to disincentivise notarizing large data - // also, take into account if destination address is 0000...00000 as this will be a SC deploy tx - gasLimit += uint64(len(tx.GetData())) + dataLen := uint64(len(tx.GetData())) + gasLimit += dataLen * ed.gasPerDataByte + //TODO reevaluate the formula or delete + /* if dataLen < ed.dataLimitForBaseCalc || core.IsEmptyAddress(tx.GetRecvAddress()) { + return gasLimit + } + + overDataLimit := dataLen - ed.dataLimitForBaseCalc + gasLimit += overDataLimit * overDataLimit * ed.gasPerDataByte */ return gasLimit } @@ -226,3 +254,8 @@ func (ed *EconomicsData) IsInterfaceNil() bool { } return false } + +// RatingsData will return the ratingsDataObject +func (ed *EconomicsData) RatingsData() *RatingsData { + return ed.ratingsData +} diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go index 72dd1d69d24..d1f86d7c3a4 100644 --- a/process/economics/economicsData_test.go +++ b/process/economics/economicsData_test.go @@ -13,6 +13,13 @@ import ( "github.com/stretchr/testify/assert" ) +const ( + validatorIncreaseRatingStep = uint32(2) + validatorDecreaseRatingStep = uint32(4) + proposerIncreaseRatingStep = uint32(1) + proposerDecreaseRatingStep = uint32(2) +) + func createDummyEconomicsConfig() *config.ConfigEconomics { return &config.ConfigEconomics{ EconomicsAddresses: config.EconomicsAddresses{ @@ -26,14 +33,25 @@ func createDummyEconomicsConfig() *config.ConfigEconomics { BurnPercentage: 0.8, }, FeeSettings: config.FeeSettings{ - MaxGasLimitPerBlock: "100000", - MinGasPrice: "18446744073709551615", - MinGasLimit: "500", + MaxGasLimitPerBlock: "100000", + MinGasPrice: "18446744073709551615", + MinGasLimit: "500", + GasPerDataByte: "1", + DataLimitForBaseCalc: "100000000", }, ValidatorSettings: config.ValidatorSettings{ StakeValue: "500000000", UnBoundPeriod: "100000", }, + RatingSettings: config.RatingSettings{ + StartRating: 50, + MaxRating: 100, + MinRating: 1, + ProposerDecreaseRatingStep: proposerDecreaseRatingStep, + ProposerIncreaseRatingStep: proposerIncreaseRatingStep, + ValidatorDecreaseRatingStep: validatorDecreaseRatingStep, + ValidatorIncreaseRatingStep: validatorIncreaseRatingStep, + }, } } @@ -276,7 +294,7 @@ func TestEconomicsData_ComputeFeeWithTxData(t *testing.T) { tx := &transaction.Transaction{ GasPrice: gasPrice, GasLimit: minGasLimit, - Data: txData, + Data: []byte(txData), } cost := economicsData.ComputeFee(tx) @@ -340,7 +358,7 @@ func TestEconomicsData_TxWithHigherGasLimitShouldErr(t *testing.T) { tx := &transaction.Transaction{ GasPrice: minGasPrice, GasLimit: minGasLimit + 1, - Data: "1", + Data: []byte("1"), } err := economicsData.CheckValidityTxValues(tx) @@ -413,3 +431,82 @@ func TestEconomicsData_BurnAddress(t *testing.T) { value := economicsData.BurnAddress() assert.Equal(t, burnAddress, value) } + +func TestEconomicsData_RatingsDataMinGreaterMaxShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RatingSettings.MinRating = 10 + economicsConfig.RatingSettings.MaxRating = 8 + economicsData, err := economics.NewEconomicsData(economicsConfig) + + assert.Nil(t, economicsData) + assert.Equal(t, process.ErrMaxRatingIsSmallerThanMinRating, err) +} + +func TestEconomicsData_RatingsDataMinSmallerThanOne(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RatingSettings.MinRating = 0 + economicsConfig.RatingSettings.MaxRating = 8 + economicsData, err := economics.NewEconomicsData(economicsConfig) + + assert.Nil(t, economicsData) + assert.Equal(t, process.ErrMinRatingSmallerThanOne, err) +} + +func TestEconomicsData_RatingsStartGreaterMaxShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RatingSettings.MinRating = 10 + economicsConfig.RatingSettings.MaxRating = 100 + economicsConfig.RatingSettings.StartRating = 110 + economicsData, err := economics.NewEconomicsData(economicsConfig) + + assert.Nil(t, economicsData) + assert.Equal(t, process.ErrStartRatingNotBetweenMinAndMax, err) +} + +func TestEconomicsData_RatingsStartLowerMinShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RatingSettings.MinRating = 10 + economicsConfig.RatingSettings.MaxRating = 100 + economicsConfig.RatingSettings.StartRating = 5 + economicsData, err := economics.NewEconomicsData(economicsConfig) + + assert.Nil(t, economicsData) + assert.Equal(t, process.ErrStartRatingNotBetweenMinAndMax, err) +} + +func TestEconomicsData_RatingsCorrectValues(t *testing.T) { + t.Parallel() + + minRating := uint32(10) + maxRating := uint32(100) + startRating := uint32(50) + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RatingSettings.MinRating = minRating + economicsConfig.RatingSettings.MaxRating = maxRating + economicsConfig.RatingSettings.StartRating = startRating + economicsConfig.RatingSettings.ProposerDecreaseRatingStep = proposerDecreaseRatingStep + economicsConfig.RatingSettings.ProposerIncreaseRatingStep = proposerIncreaseRatingStep + economicsConfig.RatingSettings.ValidatorIncreaseRatingStep = validatorIncreaseRatingStep + economicsConfig.RatingSettings.ValidatorDecreaseRatingStep = validatorDecreaseRatingStep + + economicsData, err := economics.NewEconomicsData(economicsConfig) + + assert.Nil(t, err) + assert.NotNil(t, economicsData) + assert.Equal(t, startRating, economicsData.RatingsData().StartRating()) + assert.Equal(t, minRating, economicsData.RatingsData().MinRating()) + assert.Equal(t, maxRating, economicsData.RatingsData().MaxRating()) + assert.Equal(t, validatorIncreaseRatingStep, economicsData.RatingsData().ValidatorIncreaseRatingStep()) + assert.Equal(t, validatorDecreaseRatingStep, economicsData.RatingsData().ValidatorDecreaseRatingStep()) + assert.Equal(t, proposerIncreaseRatingStep, economicsData.RatingsData().ProposerIncreaseRatingStep()) + assert.Equal(t, proposerDecreaseRatingStep, economicsData.RatingsData().ProposerDecreaseRatingStep()) +} diff --git a/process/economics/ratingsData.go b/process/economics/ratingsData.go new file mode 100644 index 00000000000..0ed3f1857e2 --- /dev/null +++ b/process/economics/ratingsData.go @@ -0,0 +1,77 @@ +package economics + +import ( + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/process" +) + +// RatingsData will store information about ratingsComputation +type RatingsData struct { + startRating uint32 + maxRating uint32 + minRating uint32 + proposerIncreaseRatingStep uint32 + proposerDecreaseRatingStep uint32 + validatorIncreaseRatingStep uint32 + validatorDecreaseRatingStep uint32 +} + +// NewRatingsData creates a new RatingsData instance +func NewRatingsData( + settings config.RatingSettings, +) (*RatingsData, error) { + if settings.MinRating < 1 { + return nil, process.ErrMinRatingSmallerThanOne + } + if settings.MinRating > settings.MaxRating { + return nil, process.ErrMaxRatingIsSmallerThanMinRating + } + if settings.MaxRating < settings.StartRating || settings.MinRating > settings.StartRating { + return nil, process.ErrStartRatingNotBetweenMinAndMax + } + + return &RatingsData{ + startRating: settings.StartRating, + maxRating: settings.MaxRating, + minRating: settings.MinRating, + proposerIncreaseRatingStep: settings.ProposerIncreaseRatingStep, + proposerDecreaseRatingStep: settings.ProposerDecreaseRatingStep, + validatorIncreaseRatingStep: settings.ValidatorIncreaseRatingStep, + validatorDecreaseRatingStep: settings.ValidatorDecreaseRatingStep, + }, nil +} + +// StartRating will return the start rating +func (rd *RatingsData) StartRating() uint32 { + return rd.startRating +} + +// MaxRating will return the max rating +func (rd *RatingsData) MaxRating() uint32 { + return rd.maxRating +} + +// MinRating will return the min rating +func (rd *RatingsData) MinRating() uint32 { + return rd.minRating +} + +// ProposerIncreaseRatingStep will return the rating step increase for validator +func (rd *RatingsData) ProposerIncreaseRatingStep() uint32 { + return rd.proposerIncreaseRatingStep +} + +// ProposerDecreaseRatingStep will return the rating step decrease for proposer +func (rd *RatingsData) ProposerDecreaseRatingStep() uint32 { + return rd.proposerDecreaseRatingStep +} + +// ValidatorIncreaseRatingStep will return the rating step increase for validator +func (rd *RatingsData) ValidatorIncreaseRatingStep() uint32 { + return rd.validatorIncreaseRatingStep +} + +// ValidatorDecreaseRatingStep will return the rating step decrease for validator +func (rd *RatingsData) ValidatorDecreaseRatingStep() uint32 { + return rd.validatorDecreaseRatingStep +} diff --git a/process/economics/testEconomicsData.go b/process/economics/testEconomicsData.go index 9daad008dfe..acd626bcb69 100644 --- a/process/economics/testEconomicsData.go +++ b/process/economics/testEconomicsData.go @@ -24,6 +24,11 @@ func (ted *TestEconomicsData) SetMinGasLimit(minGasLimit uint64) { ted.minGasLimit = minGasLimit } +// SetMinGasLimit sets the minimum gas limit for a transaction to be accepted +func (ted *TestEconomicsData) GetMinGasLimit() uint64 { + return ted.minGasLimit +} + // SetRewards sets the new reward value func (ted *TestEconomicsData) SetRewards(value *big.Int) { ted.rewardsValue = value @@ -33,3 +38,13 @@ func (ted *TestEconomicsData) SetRewards(value *big.Int) { func (ted *TestEconomicsData) GetMinGasPrice() uint64 { return ted.minGasPrice } + +// SetGasPerDataByte sets gas per data byte for a transaction to be accepted +func (ted *TestEconomicsData) SetGasPerDataByte(gasPerDataByte uint64) { + ted.gasPerDataByte = gasPerDataByte +} + +// SetDataLimitForBaseCalc sets base calc limit for gasLimit calculation +func (ted *TestEconomicsData) SetDataLimitForBaseCalc(dataLimitForBaseCalc uint64) { + ted.dataLimitForBaseCalc = dataLimitForBaseCalc +} diff --git a/process/errors.go b/process/errors.go index 59d61279480..96cd86f3b46 100644 --- a/process/errors.go +++ b/process/errors.go @@ -271,6 +271,9 @@ var ErrLenMismatch = errors.New("lengths mismatch") // ErrWrongTypeAssertion signals that an type assertion failed var ErrWrongTypeAssertion = errors.New("wrong type assertion") +// ErrHeaderShardDataMismatch signals that shard header does not match created shard info +var ErrHeaderShardDataMismatch = errors.New("shard header does not match shard info") + // ErrNoDataInMessage signals that no data was found after parsing received p2p message var ErrNoDataInMessage = errors.New("no data found in received message") @@ -283,9 +286,6 @@ var ErrNilRandSeed = errors.New("provided rand seed is nil") // ErrNilPrevRandSeed signals that a nil previous rand seed has been provided var ErrNilPrevRandSeed = errors.New("provided previous rand seed is nil") -// ErrNilRequestHeaderHandlerByNonce signals that a nil header request handler by nonce func was provided -var ErrNilRequestHeaderHandlerByNonce = errors.New("nil request header handler by nonce") - // ErrLowerRoundInBlock signals that a header round is too low for processing it var ErrLowerRoundInBlock = errors.New("header round is lower than last committed") @@ -301,8 +301,11 @@ var ErrShardIdMissmatch = errors.New("shard ID missmatch") // ErrMintAddressNotInThisShard signals that the mint address does not belong to current shard var ErrMintAddressNotInThisShard = errors.New("mint address does not belong to current shard") -// ErrNotarizedHdrsSliceIsNil signals that the slice holding last notarized headers is nil -var ErrNotarizedHdrsSliceIsNil = errors.New("notarized shard headers slice is nil") +// ErrNotarizedHeadersSliceIsNil signals that the slice holding notarized headers is nil +var ErrNotarizedHeadersSliceIsNil = errors.New("notarized headers slice is nil") + +// ErrNotarizedHeadersSliceForShardIsNil signals that the slice holding notarized headers for shard is nil +var ErrNotarizedHeadersSliceForShardIsNil = errors.New("notarized headers slice for shard is nil") // ErrCrossShardMBWithoutConfirmationFromMeta signals that miniblock was not yet notarized by metachain var ErrCrossShardMBWithoutConfirmationFromMeta = errors.New("cross shard miniblock with destination current shard is not confirmed by metachain") @@ -343,9 +346,6 @@ var ErrNilRewardTransaction = errors.New("reward transaction is nil") // ErrRewardTransactionNotFound is raised when reward transaction should be present but was not found var ErrRewardTransactionNotFound = errors.New("reward transaction not found") -// ErrInvalidDataInput signals that the data input is invalid for parsing -var ErrInvalidDataInput = errors.New("data input is invalid to create key, value storage output") - // ErrNilUTxDataPool signals that unsigned transaction pool is nil var ErrNilUTxDataPool = errors.New("unsigned transactions pool is nil") @@ -439,6 +439,12 @@ var ErrInvalidPeerAccount = errors.New("invalid peer account") // ErrInvalidMetaHeader signals that a wrong implementation of HeaderHandler was provided var ErrInvalidMetaHeader = errors.New("invalid header provided, expected MetaBlock") +// ErrNilEpochStartTrigger signals that a nil start of epoch trigger was provided +var ErrNilEpochStartTrigger = errors.New("nil start of epoch trigger") + +// ErrEpochDoesNotMatch signals that epoch does not match between headers +var ErrEpochDoesNotMatch = errors.New("epoch does not match") + // ErrNotEnoughArgumentsToDeploy signals that there are not enough arguments to deploy the smart contract var ErrNotEnoughArgumentsToDeploy = errors.New("not enough arguments to deploy the smart contract") @@ -454,6 +460,9 @@ var ErrNilTxValidator = errors.New("nil transaction validator") // ErrNilHdrValidator signals that a nil header validator has been provided var ErrNilHdrValidator = errors.New("nil header validator") +// ErrNilPendingMiniBlocksHandler signals that a nil pending miniblocks handler has been provided +var ErrNilPendingMiniBlocksHandler = errors.New("nil pending miniblocks handler") + // ErrMiniblockNotForCurrentShard signals that the current processing miniblock must not be // processed on the current shard var ErrMiniblockNotForCurrentShard = errors.New("miniblock is not addressed for current shard") @@ -470,6 +479,9 @@ var ErrNilEconomicsRewardsHandler = errors.New("nil economics rewards handler") // ErrNilEconomicsFeeHandler signals that fee handler is nil var ErrNilEconomicsFeeHandler = errors.New("nil economics fee handler") +// ErrNilDatabase signals that the database is nil +var ErrNilDatabase = errors.New("nil database") + // ErrSystemBusy signals that the system is busy var ErrSystemBusy = errors.New("system busy") @@ -485,6 +497,9 @@ var ErrHigherGasLimitRequiredInTx = errors.New("higher gas limit required in tx" // ErrInvalidMaxGasLimitPerBlock signals that an invalid max gas limit per block has been read from config file var ErrInvalidMaxGasLimitPerBlock = errors.New("invalid max gas limit per block") +// ErrInvalidGasPerDataByte signals that an invalid gas per data byte has been read from config file +var ErrInvalidGasPerDataByte = errors.New("invalid gas per data byte") + // ErrMaxGasLimitPerMiniBlockInSenderShardIsReached signals that max gas limit per mini block in sender shard has been reached var ErrMaxGasLimitPerMiniBlockInSenderShardIsReached = errors.New("max gas limit per mini block in sender shard is reached") @@ -539,15 +554,36 @@ var ErrNilMiniBlocksCompacter = errors.New("nil mini blocks compacter") // ErrNilBlackListHandler signals that a nil black list handler was provided var ErrNilBlackListHandler = errors.New("nil black list handler") +// ErrNilBlockTracker signals that a nil block tracker was provided +var ErrNilBlockTracker = errors.New("nil block tracker") + // ErrHeaderIsBlackListed signals that the header provided is black listed var ErrHeaderIsBlackListed = errors.New("header is black listed") // ErrNilEconomicsData signals that nil economics data has been provided var ErrNilEconomicsData = errors.New("nil economics data") +// ErrNilRater signals that nil rater has been provided +var ErrNilRater = errors.New("nil rater") + +// ErrNilRatingReader signals that nil rating reader has been provided +var ErrNilRatingReader = errors.New("nil rating reader") + // ErrNilNetworkWatcher signals that a nil network watcher has been provided var ErrNilNetworkWatcher = errors.New("nil network watcher") +// ErrNilHeaderValidator signals that nil header validator has been provided +var ErrNilHeaderValidator = errors.New("nil header validator") + +// ErrMaxRatingIsSmallerThanMinRating signals that the max rating is smaller than the min rating value +var ErrMaxRatingIsSmallerThanMinRating = errors.New("max rating is smaller than min rating") + +// ErrMinRatingSmallerThanOne signals that the min rating is smaller than the min value of 1 +var ErrMinRatingSmallerThanOne = errors.New("min rating is smaller than one") + +// ErrStartRatingNotBetweenMinAndMax signals that the start rating is not between min and max rating +var ErrStartRatingNotBetweenMinAndMax = errors.New("start rating is not between min and max rating") + // ErrMissingPrevShardData signals that a required shard data information is missing var ErrMissingPrevShardData = errors.New("shard data is missing") @@ -569,5 +605,26 @@ var ErrNotEnoughGas = errors.New("not enough gas was sent in the transaction") // ErrNilHeaderSigVerifier signals that a nil header sig verifier has been provided var ErrNilHeaderSigVerifier = errors.New("nil header sig verifier") +// ErrFailedTransaction signals that transaction is of type failed. +var ErrFailedTransaction = errors.New("failed transaction, gas consumed") + +// ErrNilBadTxHandler signals that bad tx handler is nil +var ErrNilBadTxHandler = errors.New("nil bad tx handler") + +// ErrNilReceiptHandler signals that receipt handler is nil +var ErrNilReceiptHandler = errors.New("nil receipt handler") + +// ErrTooManyReceiptsMiniBlocks signals that there were too many receipts miniblocks created +var ErrTooManyReceiptsMiniBlocks = errors.New("too many receipts miniblocks") + +// ErrReceiptsHashMissmatch signals that overall receipts has does not match +var ErrReceiptsHashMissmatch = errors.New("receipts hash missmatch") + +// ErrMiniBlockNumMissMatch signals that number of miniblocks does not match +var ErrMiniBlockNumMissMatch = errors.New("num miniblocks does not match") + // ErrInvalidChainID signals that an invalid chain ID has been provided var ErrInvalidChainID = errors.New("invalid chain ID while processing") + +// ErrEpochStartDataDoesNotMatch signals that EpochStartData is not the same as the leader created +var ErrEpochStartDataDoesNotMatch = errors.New("epoch start data does not match") diff --git a/process/factory/factory.go b/process/factory/factory.go index bf065bc3363..6785e364826 100644 --- a/process/factory/factory.go +++ b/process/factory/factory.go @@ -7,16 +7,16 @@ const ( UnsignedTransactionTopic = "unsignedTransactions" // RewardsTransactionTopic is the topic used for sharing fee transactions RewardsTransactionTopic = "rewardsTransactions" - // HeadersTopic is the topic used for sharing block headers - HeadersTopic = "headers" + // ShardBlocksTopic is the topic used for sharing block headers + ShardBlocksTopic = "shardBlocks" // MiniBlocksTopic is the topic used for sharing mini blocks MiniBlocksTopic = "txBlockBodies" - // PeerChBodyTopic is used for sharing peer change block bodies + // PeerChBodyTopic is the topic used for sharing peer change block bodies PeerChBodyTopic = "peerChangeBlockBodies" - // MetachainBlocksTopic is used for sharing metachain block headers between shards + // MetachainBlocksTopic is the topic used for sharing metachain block headers MetachainBlocksTopic = "metachainBlocks" - // ShardHeadersForMetachainTopic is used for sharing shards block headers to the metachain nodes - ShardHeadersForMetachainTopic = "shardHeadersForMetachain" + // TrieNodesTopic is used for sharing state trie nodes + TrieNodesTopic = "trieNodes" ) // SystemVirtualMachine is a byte array identifier for the smart contract address created for system VM @@ -25,12 +25,6 @@ var SystemVirtualMachine = []byte{0, 1} // IELEVirtualMachine is a byte array identifier for the smart contract address created for IELE VM var IELEVirtualMachine = []byte{1, 0} -// HeraWABTVirtualMachine is a byte array identifier for the smart contract address created for Hera WABT VM -var HeraWABTVirtualMachine = []byte{2, 0} - -// HeraWAVMVirtualMachine is a byte array identifier for the smart contract address created for Hera WAVM VM -var HeraWAVMVirtualMachine = []byte{3, 0} - // ArwenVirtualMachine is a byte array identifier for the smart contract address created for Arwen VM var ArwenVirtualMachine = []byte{5, 0} diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index eff7c54f2cb..674031eb2d1 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -66,6 +66,7 @@ func NewInterceptorsContainerFactory( blackList process.BlackListHandler, headerSigVerifier process.InterceptedHeaderSigVerifier, chainID []byte, + sizeCheckDelta uint32, ) (*interceptorsContainerFactory, error) { if check.IfNil(shardCoordinator) { @@ -77,6 +78,9 @@ func NewInterceptorsContainerFactory( if check.IfNil(store) { return nil, process.ErrNilStore } + if sizeCheckDelta > 0 { + marshalizer = marshal.NewSizeCheckUnmarshalizer(marshalizer, sizeCheckDelta) + } if check.IfNil(marshalizer) { return nil, process.ErrNilMarshalizer } @@ -213,6 +217,16 @@ func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer return nil, err } + keys, interceptorSlice, err = icf.generateTrieNodesInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + return container, nil } @@ -248,10 +262,9 @@ func (icf *interceptorsContainerFactory) generateMetablockInterceptor() ([]strin } argProcessor := &processor.ArgHdrInterceptorProcessor{ - Headers: icf.dataPool.MetaBlocks(), - HeadersNonces: icf.dataPool.HeadersNonces(), - HdrValidator: hdrValidator, - BlackList: icf.blackList, + Headers: icf.dataPool.Headers(), + HdrValidator: hdrValidator, + BlackList: icf.blackList, } hdrProcessor, err := processor.NewHdrInterceptorProcessor(argProcessor) if err != nil { @@ -284,9 +297,9 @@ func (icf *interceptorsContainerFactory) generateShardHeaderInterceptors() ([]st keys := make([]string, noOfShards) interceptorSlice := make([]process.Interceptor, noOfShards) - //wire up to topics: shardHeadersForMetachain_0_META, shardHeadersForMetachain_1_META ... + //wire up to topics: shardBlocks_0_META, shardBlocks_1_META ... for idx := uint32(0); idx < noOfShards; idx++ { - identifierHeader := factory.ShardHeadersForMetachainTopic + shardC.CommunicationIdentifier(idx) + identifierHeader := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(idx) interceptor, err := icf.createOneShardHeaderInterceptor(identifierHeader) if err != nil { return nil, nil, err @@ -313,10 +326,9 @@ func (icf *interceptorsContainerFactory) createOneShardHeaderInterceptor(topic s } argProcessor := &processor.ArgHdrInterceptorProcessor{ - Headers: icf.dataPool.ShardHeaders(), - HeadersNonces: icf.dataPool.HeadersNonces(), - HdrValidator: hdrValidator, - BlackList: icf.blackList, + Headers: icf.dataPool.Headers(), + HdrValidator: hdrValidator, + BlackList: icf.blackList, } hdrProcessor, err := processor.NewHdrInterceptorProcessor(argProcessor) if err != nil { @@ -524,6 +536,43 @@ func (icf *interceptorsContainerFactory) createOneMiniBlocksInterceptor(topic st return icf.createTopicAndAssignHandler(topic, interceptor, true) } +func (icf *interceptorsContainerFactory) generateTrieNodesInterceptors() ([]string, []process.Interceptor, error) { + shardC := icf.shardCoordinator + + identifierTrieNodes := factory.TrieNodesTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + + interceptor, err := icf.createOneTrieNodesInterceptor(identifierTrieNodes) + if err != nil { + return nil, nil, err + } + + return []string{identifierTrieNodes}, []process.Interceptor{interceptor}, nil +} + +func (icf *interceptorsContainerFactory) createOneTrieNodesInterceptor(topic string) (process.Interceptor, error) { + trieNodesProcessor, err := processor.NewTrieNodesInterceptorProcessor(icf.dataPool.TrieNodes()) + if err != nil { + return nil, err + } + + trieNodesFactory, err := interceptorFactory.NewInterceptedTrieNodeDataFactory(icf.argInterceptorFactory) + if err != nil { + return nil, err + } + + interceptor, err := interceptors.NewMultiDataInterceptor( + icf.marshalizer, + trieNodesFactory, + trieNodesProcessor, + icf.globalThrottler, + ) + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(topic, interceptor, true) +} + // IsInterfaceNil returns true if there is no value under the interface func (icf *interceptorsContainerFactory) IsInterfaceNil() bool { if icf == nil { diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index 2c8543c544d..858a8b4a912 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -49,24 +49,21 @@ func createStubTopicHandler(matchStrToErrOnCreate string, matchStrToErrOnRegiste func createDataPools() dataRetriever.MetaPoolsHolder { pools := &mock.MetaPoolsHolderStub{ - ShardHeadersCalled: func() storage.Cacher { - return &mock.CacherStub{} + HeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} }, MiniBlocksCalled: func() storage.Cacher { return &mock.CacherStub{} }, - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{} - }, - HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} - }, TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} }, UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} }, + TrieNodesCalled: func() storage.Cacher { + return &mock.CacherStub{} + }, } return pools @@ -105,6 +102,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -134,6 +132,7 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -163,6 +162,7 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -192,6 +192,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -221,6 +222,37 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewInterceptorsContainerFactory_NilMarshalizerAndSizeCheckShouldErr(t *testing.T) { + t.Parallel() + + icf, err := metachain.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + nil, + &mock.HasherMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, + chainID, + 1, ) assert.Nil(t, icf) @@ -250,6 +282,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -279,6 +312,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -308,6 +342,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -337,6 +372,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -366,6 +402,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConvShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -395,6 +432,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -424,6 +462,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -453,6 +492,7 @@ func TestNewInterceptorsContainerFactory_NilFeeHandlerShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -482,6 +522,7 @@ func TestNewInterceptorsContainerFactory_NilBlackListHandlerShouldErr(t *testing nil, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -511,6 +552,7 @@ func TestNewInterceptorsContainerFactory_EmptyCahinIDShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, nil, + 0, ) assert.Nil(t, icf) @@ -540,6 +582,37 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, + ) + + assert.NotNil(t, icf) + assert.Nil(t, err) +} + +func TestNewInterceptorsContainerFactory_ShouldWorkWithSizeCheck(t *testing.T) { + t.Parallel() + + icf, err := metachain.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, + chainID, + 1, ) assert.NotNil(t, icf) @@ -571,6 +644,7 @@ func TestInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *tes &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -585,7 +659,7 @@ func TestInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsSh icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.ShardHeadersForMetachainTopic, ""), + createStubTopicHandler(factory.ShardBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -602,6 +676,7 @@ func TestInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsSh &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -633,6 +708,7 @@ func TestInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr( &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -647,7 +723,39 @@ func TestInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFail icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.ShardHeadersForMetachainTopic), + createStubTopicHandler("", factory.ShardBlocksTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, + chainID, + 0, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestInterceptorsContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t *testing.T) { + t.Parallel() + + icf, _ := metachain.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.TrieNodesTopic), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -664,6 +772,7 @@ func TestInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFail &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -702,6 +811,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -753,6 +863,7 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -762,7 +873,8 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorsTransactionsForMetachain := noOfShards + 1 numInterceptorsMiniBlocksForMetachain := noOfShards + 1 numInterceptorsUnsignedTxsForMetachain := noOfShards - totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + + numInterceptorsTrieNodes := 1 + totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain assert.Nil(t, err) diff --git a/process/factory/metachain/intermediateProcessorsContainerFactory.go b/process/factory/metachain/intermediateProcessorsContainerFactory.go index b2933370787..e03f3466001 100644 --- a/process/factory/metachain/intermediateProcessorsContainerFactory.go +++ b/process/factory/metachain/intermediateProcessorsContainerFactory.go @@ -7,7 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/block/postprocess" "github.com/ElrondNetwork/elrond-go/process/factory/containers" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -78,7 +78,7 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia } func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := preprocess.NewIntermediateResultsProcessor( + irp, err := postprocess.NewIntermediateResultsProcessor( ppcm.hasher, ppcm.marshalizer, ppcm.shardCoordinator, diff --git a/process/factory/metachain/preProcessorsContainerFactory.go b/process/factory/metachain/preProcessorsContainerFactory.go index cc17f662a8a..de874a9c1b7 100644 --- a/process/factory/metachain/preProcessorsContainerFactory.go +++ b/process/factory/metachain/preProcessorsContainerFactory.go @@ -137,6 +137,7 @@ func (ppcm *preProcessorsContainerFactory) createTxPreProcessor() (process.PrePr ppcm.economicsFee, ppcm.miniBlocksCompacter, ppcm.gasHandler, + block.TxBlock, ) return txPreprocessor, err diff --git a/process/factory/metachain/preProcessorsContainerFactory_test.go b/process/factory/metachain/preProcessorsContainerFactory_test.go index b9fd68357c6..40baba5d2f1 100644 --- a/process/factory/metachain/preProcessorsContainerFactory_test.go +++ b/process/factory/metachain/preProcessorsContainerFactory_test.go @@ -20,7 +20,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -42,7 +42,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -64,7 +64,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -86,7 +86,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { nil, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -108,7 +108,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &mock.HasherMock{}, nil, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -130,7 +130,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), nil, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -152,7 +152,7 @@ func TestNewPreProcessorsContainerFactory_NilFeeHandler(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, nil, @@ -174,7 +174,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, nil, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -217,7 +217,7 @@ func TestNewPreProcessorsContainerFactory_NilMiniBlocksCompacter(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -238,7 +238,7 @@ func TestNewPreProcessorsContainerFactory_NilGasHandler(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -259,7 +259,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -286,7 +286,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &mock.HasherMock{}, dataPool, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, @@ -312,7 +312,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &mock.HasherMock{}, mock.NewMetaPoolsHolderFake(), &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, &mock.FeeHandlerStub{}, diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index e906c093420..040a34c23bf 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -57,14 +57,25 @@ func TestVmContainerFactory_Create(t *testing.T) { BurnPercentage: 0.40, }, FeeSettings: config.FeeSettings{ - MaxGasLimitPerBlock: "10000000000", - MinGasPrice: "10", - MinGasLimit: "10", + MaxGasLimitPerBlock: "10000000000", + MinGasPrice: "10", + MinGasLimit: "10", + GasPerDataByte: "1", + DataLimitForBaseCalc: "10000", }, ValidatorSettings: config.ValidatorSettings{ StakeValue: "500", UnBoundPeriod: "1000", }, + RatingSettings: config.RatingSettings{ + StartRating: 5, + MaxRating: 10, + MinRating: 1, + ProposerIncreaseRatingStep: 2, + ProposerDecreaseRatingStep: 4, + ValidatorIncreaseRatingStep: 1, + ValidatorDecreaseRatingStep: 2, + }, }, ) diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 144432439cb..a6a47c7a6e8 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -61,6 +61,7 @@ func NewInterceptorsContainerFactory( blackList process.BlackListHandler, headerSigVerifier process.InterceptedHeaderSigVerifier, chainID []byte, + sizeCheckDelta uint32, ) (*interceptorsContainerFactory, error) { if check.IfNil(accounts) { return nil, process.ErrNilAccountsAdapter @@ -74,6 +75,9 @@ func NewInterceptorsContainerFactory( if check.IfNil(store) { return nil, process.ErrNilBlockChain } + if sizeCheckDelta > 0 { + marshalizer = marshal.NewSizeCheckUnmarshalizer(marshalizer, sizeCheckDelta) + } if check.IfNil(marshalizer) { return nil, process.ErrNilMarshalizer } @@ -224,6 +228,16 @@ func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer return nil, err } + keys, interceptorSlice, err = icf.generateTrieNodesInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + return container, nil } @@ -463,10 +477,9 @@ func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []p } argProcessor := &processor.ArgHdrInterceptorProcessor{ - Headers: icf.dataPool.Headers(), - HeadersNonces: icf.dataPool.HeadersNonces(), - HdrValidator: hdrValidator, - BlackList: icf.blackList, + Headers: icf.dataPool.Headers(), + HdrValidator: hdrValidator, + BlackList: icf.blackList, } hdrProcessor, err := processor.NewHdrInterceptorProcessor(argProcessor) if err != nil { @@ -483,7 +496,8 @@ func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []p return nil, nil, err } - identifierHdr := factory.HeadersTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + // compose header shard topic, for example: shardBlocks_0_META + identifierHdr := factory.ShardBlocksTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) _, err = icf.createTopicAndAssignHandler(identifierHdr, interceptor, true) if err != nil { return nil, nil, err @@ -571,10 +585,9 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ } argProcessor := &processor.ArgHdrInterceptorProcessor{ - Headers: icf.dataPool.MetaBlocks(), - HeadersNonces: icf.dataPool.HeadersNonces(), - HdrValidator: hdrValidator, - BlackList: icf.blackList, + Headers: icf.dataPool.Headers(), + HdrValidator: hdrValidator, + BlackList: icf.blackList, } hdrProcessor, err := processor.NewHdrInterceptorProcessor(argProcessor) if err != nil { @@ -599,6 +612,59 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ return []string{identifierHdr}, []process.Interceptor{interceptor}, nil } +func (icf *interceptorsContainerFactory) generateTrieNodesInterceptors() ([]string, []process.Interceptor, error) { + shardC := icf.shardCoordinator + + keys := make([]string, 0) + interceptorSlice := make([]process.Interceptor, 0) + + identifierTrieNodes := factory.TrieNodesTopic + shardC.CommunicationIdentifier(shardC.SelfId()) + + interceptor, err := icf.createOneTrieNodesInterceptor(identifierTrieNodes) + if err != nil { + return nil, nil, err + } + + keys = append(keys, identifierTrieNodes) + interceptorSlice = append(interceptorSlice, interceptor) + + identifierTrieNodes = factory.TrieNodesTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + + interceptor, err = icf.createOneTrieNodesInterceptor(identifierTrieNodes) + if err != nil { + return nil, nil, err + } + + keys = append(keys, identifierTrieNodes) + interceptorSlice = append(interceptorSlice, interceptor) + + return keys, interceptorSlice, nil +} + +func (icf *interceptorsContainerFactory) createOneTrieNodesInterceptor(topic string) (process.Interceptor, error) { + trieNodesProcessor, err := processor.NewTrieNodesInterceptorProcessor(icf.dataPool.TrieNodes()) + if err != nil { + return nil, err + } + + trieNodesFactory, err := interceptorFactory.NewInterceptedTrieNodeDataFactory(icf.argInterceptorFactory) + if err != nil { + return nil, err + } + + interceptor, err := interceptors.NewMultiDataInterceptor( + icf.marshalizer, + trieNodesFactory, + trieNodesProcessor, + icf.globalTxThrottler, + ) + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(topic, interceptor, true) +} + // IsInterfaceNil returns true if there is no value under the interface func (icf *interceptorsContainerFactory) IsInterfaceNil() bool { if icf == nil { diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 13bab100e0e..e1e9f9d291d 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -50,11 +50,8 @@ func createDataPools() dataRetriever.PoolsHolder { pools.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } - pools.HeadersCalled = func() storage.Cacher { - return &mock.CacherStub{} - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return &mock.Uint64SyncMapCacherStub{} + pools.HeadersCalled = func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} } pools.MiniBlocksCalled = func() storage.Cacher { return &mock.CacherStub{} @@ -71,6 +68,9 @@ func createDataPools() dataRetriever.PoolsHolder { pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } + pools.TrieNodesCalled = func() storage.Cacher { + return &mock.CacherStub{} + } pools.CurrBlockTxsCalled = func() dataRetriever.TransactionCacher { return &mock.TxForCurrentBlockStub{} } @@ -109,6 +109,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -138,6 +139,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -167,6 +169,7 @@ func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -196,6 +199,7 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -225,6 +229,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -254,6 +259,37 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewInterceptorsContainerFactory_NilMarshalizerAndSizeCheckShouldErr(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + nil, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, + chainID, + 1, ) assert.Nil(t, icf) @@ -283,6 +319,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -312,6 +349,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -341,6 +379,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -370,6 +409,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -399,6 +439,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -428,6 +469,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -457,6 +499,7 @@ func TestNewInterceptorsContainerFactory_NilTxFeeHandlerShouldErr(t *testing.T) &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -486,6 +529,7 @@ func TestNewInterceptorsContainerFactory_NilBlackListHandlerShouldErr(t *testing nil, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) assert.Nil(t, icf) @@ -515,6 +559,7 @@ func TestNewInterceptorsContainerFactory_EmptyChainIDShouldErr(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, nil, + 0, ) assert.Nil(t, icf) @@ -544,6 +589,37 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, + ) + + assert.NotNil(t, icf) + assert.Nil(t, err) +} + +func TestNewInterceptorsContainerFactory_ShouldWorkWithSizeCheck(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, + chainID, + 1, ) assert.NotNil(t, icf) @@ -575,6 +651,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -590,7 +667,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - createStubTopicHandler(factory.HeadersTopic, ""), + createStubTopicHandler(factory.ShardBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -606,6 +683,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -637,6 +715,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -668,6 +747,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -699,6 +779,7 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -714,7 +795,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), mock.NewNodesCoordinatorMock(), - createStubTopicHandler("", factory.HeadersTopic), + createStubTopicHandler("", factory.ShardBlocksTopic), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, @@ -730,6 +811,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -761,6 +843,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -792,6 +875,39 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, + ) + + container, err := icf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestInterceptorsContainerFactory_CreateRegisterTrieNodesShouldErr(t *testing.T) { + t.Parallel() + + icf, _ := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + createStubTopicHandler("", factory.TrieNodesTopic), + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + &mock.BlackListHandlerStub{}, + &mock.HeaderSigVerifierStub{}, + chainID, + 0, ) container, err := icf.Create() @@ -830,6 +946,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -881,6 +998,7 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { &mock.BlackListHandlerStub{}, &mock.HeaderSigVerifierStub{}, chainID, + 0, ) container, err := icf.Create() @@ -891,8 +1009,9 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorHeaders := 1 numInterceptorMiniBlocks := noOfShards + 1 numInterceptorMetachainHeaders := 1 + numInterceptorTrieNodes := 2 totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + - numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes assert.Nil(t, err) assert.Equal(t, totalInterceptors, container.Len()) diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index ff807a960e7..f7edb2ff355 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/postprocess" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory/containers" @@ -94,11 +95,31 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia return nil, err } + interproc, err = ppcm.createReceiptIntermediateProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.ReceiptBlock, interproc) + if err != nil { + return nil, err + } + + interproc, err = ppcm.createBadTransactionsIntermediateProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.InvalidBlock, interproc) + if err != nil { + return nil, err + } + return container, nil } func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIntermediateProcessor() (process.IntermediateTransactionHandler, error) { - irp, err := preprocess.NewIntermediateResultsProcessor( + irp, err := postprocess.NewIntermediateResultsProcessor( ppcm.hasher, ppcm.marshalizer, ppcm.shardCoordinator, @@ -111,6 +132,32 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn return irp, err } +func (ppcm *intermediateProcessorsContainerFactory) createReceiptIntermediateProcessor() (process.IntermediateTransactionHandler, error) { + irp, err := postprocess.NewOneMiniBlockPostProcessor( + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.store, + block.ReceiptBlock, + dataRetriever.UnsignedTransactionUnit, + ) + + return irp, err +} + +func (ppcm *intermediateProcessorsContainerFactory) createBadTransactionsIntermediateProcessor() (process.IntermediateTransactionHandler, error) { + irp, err := postprocess.NewOneMiniBlockPostProcessor( + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.store, + block.InvalidBlock, + dataRetriever.TransactionUnit, + ) + + return irp, err +} + func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { irp, err := preprocess.NewRewardTxHandler( ppcm.specialAddressHandler, diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index 96177a6202f..f8d5410961e 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -144,5 +144,5 @@ func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { container, err := ipcf.Create() assert.Nil(t, err) - assert.Equal(t, 2, container.Len()) + assert.Equal(t, 4, container.Len()) } diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 0d6fb7265ac..31fe49596fb 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -125,7 +125,7 @@ func NewPreProcessorsContainerFactory( func (ppcm *preProcessorsContainerFactory) Create() (process.PreProcessorsContainer, error) { container := containers.NewPreProcessorsContainer() - preproc, err := ppcm.createTxPreProcessor() + preproc, err := ppcm.createTxPreProcessor(block.TxBlock) if err != nil { return nil, err } @@ -135,6 +135,16 @@ func (ppcm *preProcessorsContainerFactory) Create() (process.PreProcessorsContai return nil, err } + preproc, err = ppcm.createTxPreProcessor(block.InvalidBlock) + if err != nil { + return nil, err + } + + err = container.Add(block.InvalidBlock, preproc) + if err != nil { + return nil, err + } + preproc, err = ppcm.createSmartContractResultPreProcessor() if err != nil { return nil, err @@ -158,7 +168,7 @@ func (ppcm *preProcessorsContainerFactory) Create() (process.PreProcessorsContai return container, nil } -func (ppcm *preProcessorsContainerFactory) createTxPreProcessor() (process.PreProcessor, error) { +func (ppcm *preProcessorsContainerFactory) createTxPreProcessor(blockType block.Type) (process.PreProcessor, error) { txPreprocessor, err := preprocess.NewTransactionPreprocessor( ppcm.dataPool.Transactions(), ppcm.store, @@ -171,6 +181,7 @@ func (ppcm *preProcessorsContainerFactory) createTxPreProcessor() (process.PrePr ppcm.economicsFee, ppcm.miniBlocksCompacter, ppcm.gasHandler, + blockType, ) return txPreprocessor, err diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index be2fb4ee06e..d60bb2239b5 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -20,7 +20,7 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -46,7 +46,7 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -72,7 +72,7 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -98,7 +98,7 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -124,7 +124,7 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { nil, &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -150,7 +150,7 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { mock.NewPoolsHolderMock(), nil, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -176,7 +176,7 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, nil, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -202,7 +202,7 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, nil, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -228,7 +228,7 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, nil, &mock.SmartContractResultsProcessorMock{}, @@ -254,7 +254,7 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, nil, @@ -280,7 +280,7 @@ func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -332,7 +332,7 @@ func TestNewPreProcessorsContainerFactory_NilInternalTransactionProducer(t *test mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -358,7 +358,7 @@ func TestNewPreProcessorsContainerFactory_NilFeeHandler(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -384,7 +384,7 @@ func TestNewPreProcessorsContainerFactory_NilMiniBlocksCompacter(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -410,7 +410,7 @@ func TestNewPreProcessorsContainerFactory_NilGasHandler(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -436,7 +436,7 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -466,7 +466,7 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { dataPool, &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -506,7 +506,7 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { dataPool, &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -555,7 +555,7 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { dataPool, &mock.AddressConverterMock{}, &mock.AccountsStub{}, - &mock.RequestHandlerMock{}, + &mock.RequestHandlerStub{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, @@ -571,5 +571,5 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { container, err := ppcm.Create() assert.Nil(t, err) - assert.Equal(t, 3, container.Len()) + assert.Equal(t, 4, container.Len()) } diff --git a/process/headerCheck/errors.go b/process/headerCheck/errors.go new file mode 100644 index 00000000000..906113e8590 --- /dev/null +++ b/process/headerCheck/errors.go @@ -0,0 +1,10 @@ +package headerCheck + +import "errors" + +// ErrNotEnoughSignatures signals that a block is not signed by at least the minimum number of validators from +// the consensus group +var ErrNotEnoughSignatures = errors.New("not enough signatures in block") + +// ErrWrongSizeBitmap signals that the provided bitmap's length is bigger than the one that was required +var ErrWrongSizeBitmap = errors.New("wrong size bitmap has been provided") diff --git a/process/headerCheck/headerSignatureVerify.go b/process/headerCheck/headerSignatureVerify.go index 9c381586a7b..d485cf43051 100644 --- a/process/headerCheck/headerSignatureVerify.go +++ b/process/headerCheck/headerSignatureVerify.go @@ -1,6 +1,8 @@ package headerCheck import ( + "math/bits" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/crypto" @@ -79,7 +81,6 @@ func checkArgsHeaderSigVerifier(arguments *ArgsHeaderSigVerifier) error { // VerifySignature will check if signature is correct func (hsv *HeaderSigVerifier) VerifySignature(header data.HeaderHandler) error { - randSeed := header.GetPrevRandSeed() bitmap := header.GetPubKeysBitmap() if len(bitmap) == 0 { @@ -98,6 +99,11 @@ func (hsv *HeaderSigVerifier) VerifySignature(header data.HeaderHandler) error { return err } + err = hsv.verifyConsensusSize(consensusPubKeys, header) + if err != nil { + return err + } + verifier, err := hsv.multiSigVerifier.Create(consensusPubKeys, 0) if err != nil { return err @@ -120,6 +126,38 @@ func (hsv *HeaderSigVerifier) VerifySignature(header data.HeaderHandler) error { return verifier.Verify(hash, bitmap) } +func (hsv *HeaderSigVerifier) verifyConsensusSize(consensusPubKeys []string, header data.HeaderHandler) error { + consensusSize := len(consensusPubKeys) + bitmap := header.GetPubKeysBitmap() + + expectedBitmapSize := consensusSize / 8 + if consensusSize%8 != 0 { + expectedBitmapSize++ + } + if len(bitmap) != expectedBitmapSize { + log.Debug("wrong size bitmap", + "expected number of bytes", expectedBitmapSize, + "actual", len(bitmap)) + return ErrWrongSizeBitmap + } + + numOfOnesInBitmap := 0 + for index := range bitmap { + numOfOnesInBitmap += bits.OnesCount8(bitmap[index]) + } + + minNumRequiredSignatures := consensusSize*2/3 + 1 + if numOfOnesInBitmap >= minNumRequiredSignatures { + return nil + } + + log.Debug("not enough signatures", + "minimum expected", minNumRequiredSignatures, + "actual", numOfOnesInBitmap) + + return ErrNotEnoughSignatures +} + // VerifyRandSeed will check if rand seed is correct func (hsv *HeaderSigVerifier) VerifyRandSeed(header data.HeaderHandler) error { leaderPubKey, err := hsv.getLeader(header) diff --git a/process/headerCheck/headerSignatureVerify_test.go b/process/headerCheck/headerSignatureVerify_test.go new file mode 100644 index 00000000000..6f236bd7401 --- /dev/null +++ b/process/headerCheck/headerSignatureVerify_test.go @@ -0,0 +1,417 @@ +package headerCheck + +import ( + "bytes" + "errors" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/crypto" + dataBlock "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/require" +) + +func createHeaderSigVerifierArgs() *ArgsHeaderSigVerifier { + return &ArgsHeaderSigVerifier{ + Marshalizer: &mock.MarshalizerMock{}, + Hasher: &mock.HasherMock{}, + NodesCoordinator: &mock.NodesCoordinatorMock{}, + MultiSigVerifier: mock.NewMultiSigner(), + SingleSigVerifier: &mock.SignerMock{}, + KeyGen: &mock.SingleSignKeyGenMock{}, + } +} + +func TestNewHeaderSigVerifier_NilArgumentsShouldErr(t *testing.T) { + t.Parallel() + + hdrSigVerifier, err := NewHeaderSigVerifier(nil) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, process.ErrNilArgumentStruct, err) +} + +func TestNewHeaderSigVerifier_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + args.Hasher = nil + hdrSigVerifier, err := NewHeaderSigVerifier(args) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, process.ErrNilHasher, err) +} + +func TestNewHeaderSigVerifier_NilKeyGenShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + args.KeyGen = nil + hdrSigVerifier, err := NewHeaderSigVerifier(args) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, process.ErrNilKeyGen, err) +} + +func TestNewHeaderSigVerifier_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + args.Marshalizer = nil + hdrSigVerifier, err := NewHeaderSigVerifier(args) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewHeaderSigVerifier_NilMultiSigShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + args.MultiSigVerifier = nil + hdrSigVerifier, err := NewHeaderSigVerifier(args) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, process.ErrNilMultiSigVerifier, err) +} + +func TestNewHeaderSigVerifier_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + args.NodesCoordinator = nil + hdrSigVerifier, err := NewHeaderSigVerifier(args) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, process.ErrNilNodesCoordinator, err) +} + +func TestNewHeaderSigVerifier_NilSingleSigShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + args.SingleSigVerifier = nil + hdrSigVerifier, err := NewHeaderSigVerifier(args) + + require.Nil(t, hdrSigVerifier) + require.Equal(t, process.ErrNilSingleSigner, err) +} + +func TestHeaderSigVerifier_VerifySignatureNilPrevRandSeedShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{} + + err := hdrSigVerifier.VerifyRandSeed(header) + require.Equal(t, sharding.ErrNilRandomness, err) +} + +func TestHeaderSigVerifier_VerifyRandSeedOk(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + wasCalled := false + + args.KeyGen = &mock.SingleSignKeyGenMock{ + PublicKeyFromByteArrayCalled: func(b []byte) (key crypto.PublicKey, err error) { + return &mock.SingleSignPublicKey{}, nil + }, + } + args.SingleSigVerifier = &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + wasCalled = true + return nil + }, + } + + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{} + + err := hdrSigVerifier.VerifyRandSeed(header) + require.Nil(t, err) + require.True(t, wasCalled) +} + +func TestHeaderSigVerifier_VerifyRandSeedShouldErrWhenVerificationFails(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + wasCalled := false + localError := errors.New("err") + + args.KeyGen = &mock.SingleSignKeyGenMock{ + PublicKeyFromByteArrayCalled: func(b []byte) (key crypto.PublicKey, err error) { + return &mock.SingleSignPublicKey{}, nil + }, + } + args.SingleSigVerifier = &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + wasCalled = true + return localError + }, + } + + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{} + + err := hdrSigVerifier.VerifyRandSeed(header) + require.Equal(t, localError, err) + require.True(t, wasCalled) +} + +func TestHeaderSigVerifier_VerifyRandSeedAndLeaderSignatureNilRandomnessShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{} + + err := hdrSigVerifier.VerifyRandSeedAndLeaderSignature(header) + require.Equal(t, sharding.ErrNilRandomness, err) +} + +func TestHeaderSigVerifier_VerifyRandSeedAndLeaderSignatureVerifyShouldErrWhenValidationFails(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + count := 0 + localErr := errors.New("err") + + args.KeyGen = &mock.SingleSignKeyGenMock{ + PublicKeyFromByteArrayCalled: func(b []byte) (key crypto.PublicKey, err error) { + return &mock.SingleSignPublicKey{}, nil + }, + } + args.SingleSigVerifier = &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + count++ + return localErr + }, + } + + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{} + + err := hdrSigVerifier.VerifyRandSeedAndLeaderSignature(header) + require.Equal(t, localErr, err) + require.Equal(t, 1, count) +} + +func TestHeaderSigVerifier_VerifyRandSeedAndLeaderSignatureVerifyLeaderSigShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + count := 0 + localErr := errors.New("err") + leaderSig := []byte("signature") + + args.KeyGen = &mock.SingleSignKeyGenMock{ + PublicKeyFromByteArrayCalled: func(b []byte) (key crypto.PublicKey, err error) { + return &mock.SingleSignPublicKey{}, nil + }, + } + args.SingleSigVerifier = &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + count++ + if bytes.Equal(sig, leaderSig) { + return localErr + } + return nil + }, + } + + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{ + LeaderSignature: leaderSig, + } + + err := hdrSigVerifier.VerifyRandSeedAndLeaderSignature(header) + require.Equal(t, localErr, err) + require.Equal(t, 2, count) +} + +func TestHeaderSigVerifier_VerifyRandSeedAndLeaderSignature(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + count := 0 + + args.KeyGen = &mock.SingleSignKeyGenMock{ + PublicKeyFromByteArrayCalled: func(b []byte) (key crypto.PublicKey, err error) { + return &mock.SingleSignPublicKey{}, nil + }, + } + args.SingleSigVerifier = &mock.SignerMock{ + VerifyStub: func(public crypto.PublicKey, msg []byte, sig []byte) error { + count++ + return nil + }, + } + + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{} + + err := hdrSigVerifier.VerifyRandSeedAndLeaderSignature(header) + require.Nil(t, err) + require.Equal(t, 2, count) +} + +func TestHeaderSigVerifier_VerifySignatureNilBitmapShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{} + + err := hdrSigVerifier.VerifySignature(header) + require.Equal(t, process.ErrNilPubKeysBitmap, err) +} + +func TestHeaderSigVerifier_VerifySignatureBlockProposerSigMissingShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{ + PubKeysBitmap: []byte("0"), + } + + err := hdrSigVerifier.VerifySignature(header) + require.Equal(t, process.ErrBlockProposerSignatureMissing, err) +} + +func TestHeaderSigVerifier_VerifySignatureNilRandomnessShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{ + PubKeysBitmap: []byte("1"), + } + + err := hdrSigVerifier.VerifySignature(header) + require.Equal(t, sharding.ErrNilRandomness, err) +} + +func TestHeaderSigVerifier_VerifySignatureWrongSizeBitmapShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{ + PubKeysBitmap: []byte("11"), + } + + err := hdrSigVerifier.VerifySignature(header) + require.Equal(t, ErrWrongSizeBitmap, err) +} + +func TestHeaderSigVerifier_VerifySignatureNotEnoughSigsShouldErr(t *testing.T) { + t.Parallel() + + args := createHeaderSigVerifierArgs() + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v, v, v, v, v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{ + PubKeysBitmap: []byte("A"), + } + + err := hdrSigVerifier.VerifySignature(header) + require.Equal(t, ErrNotEnoughSignatures, err) +} + +func TestHeaderSigVerifier_VerifySignatureOk(t *testing.T) { + t.Parallel() + + wasCalled := false + args := createHeaderSigVerifierArgs() + pkAddr := []byte("aaa00000000000000000000000000000") + nodesCoordinator := &mock.NodesCoordinatorMock{ + ComputeValidatorsGroupCalled: func(randomness []byte, round uint64, shardId uint32) (validators []sharding.Validator, err error) { + v, _ := sharding.NewValidator(big.NewInt(0), 1, pkAddr, pkAddr) + return []sharding.Validator{v}, nil + }, + } + args.NodesCoordinator = nodesCoordinator + + args.MultiSigVerifier = &mock.BelNevMock{ + CreateMock: func(pubKeys []string, index uint16) (signer crypto.MultiSigner, err error) { + return &mock.BelNevMock{ + VerifyMock: func(msg []byte, bitmap []byte) error { + wasCalled = true + return nil + }}, nil + }, + } + + hdrSigVerifier, _ := NewHeaderSigVerifier(args) + header := &dataBlock.Header{ + PubKeysBitmap: []byte("1"), + } + + err := hdrSigVerifier.VerifySignature(header) + require.Nil(t, err) + require.True(t, wasCalled) +} diff --git a/process/interceptors/factory/interceptedTrieNodeDataFactory.go b/process/interceptors/factory/interceptedTrieNodeDataFactory.go new file mode 100644 index 00000000000..0c16c5ebe4c --- /dev/null +++ b/process/interceptors/factory/interceptedTrieNodeDataFactory.go @@ -0,0 +1,49 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" +) + +type interceptedTrieNodeDataFactory struct { + marshalizer marshal.Marshalizer + hasher hashing.Hasher +} + +// NewInterceptedTrieNodeDataFactory creates an instance of interceptedTrieNodeDataFactory +func NewInterceptedTrieNodeDataFactory( + argument *ArgInterceptedDataFactory, +) (*interceptedTrieNodeDataFactory, error) { + + if argument == nil { + return nil, process.ErrNilArgumentStruct + } + if check.IfNil(argument.Marshalizer) { + return nil, process.ErrNilMarshalizer + } + if check.IfNil(argument.Hasher) { + return nil, process.ErrNilHasher + } + + return &interceptedTrieNodeDataFactory{ + marshalizer: argument.Marshalizer, + hasher: argument.Hasher, + }, nil +} + +// Create creates instances of InterceptedData by unmarshalling provided buffer +func (sidf *interceptedTrieNodeDataFactory) Create(buff []byte) (process.InterceptedData, error) { + return trie.NewInterceptedTrieNode(buff, sidf.marshalizer, sidf.hasher) + +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sidf *interceptedTrieNodeDataFactory) IsInterfaceNil() bool { + if sidf == nil { + return true + } + return false +} diff --git a/process/interceptors/factory/interceptedTrieNodeDataFactory_test.go b/process/interceptors/factory/interceptedTrieNodeDataFactory_test.go new file mode 100644 index 00000000000..2c2eab70fb0 --- /dev/null +++ b/process/interceptors/factory/interceptedTrieNodeDataFactory_test.go @@ -0,0 +1,47 @@ +package factory + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/process" + "github.com/stretchr/testify/assert" +) + +func TestNewInterceptedTrieNodeDataFactory_NilArgumentsShouldErr(t *testing.T) { + t.Parallel() + + itn, err := NewInterceptedTrieNodeDataFactory(nil) + + assert.Nil(t, itn) + assert.Equal(t, process.ErrNilArgumentStruct, err) +} + +func TestNewInterceptedTrieNodeDataFactory_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgument() + arg.Marshalizer = nil + + itn, err := NewInterceptedTrieNodeDataFactory(arg) + assert.Nil(t, itn) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewInterceptedTrieNodeDataFactory_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgument() + arg.Hasher = nil + + itn, err := NewInterceptedTrieNodeDataFactory(arg) + assert.Nil(t, itn) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewInterceptedTrieNodeDataFactory_OkValsShouldWork(t *testing.T) { + t.Parallel() + + itn, err := NewInterceptedTrieNodeDataFactory(createMockArgument()) + assert.NotNil(t, itn) + assert.Nil(t, err) +} diff --git a/process/interceptors/multiDataInterceptor.go b/process/interceptors/multiDataInterceptor.go index 4eac885eccd..f0f1a1ec4b1 100644 --- a/process/interceptors/multiDataInterceptor.go +++ b/process/interceptors/multiDataInterceptor.go @@ -71,28 +71,26 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, } filteredMultiDataBuff := make([][]byte, 0, len(multiDataBuff)) + interceptedMultiData := make([]process.InterceptedData, 0) lastErrEncountered := error(nil) wgProcess := &sync.WaitGroup{} wgProcess.Add(len(multiDataBuff)) + go func() { wgProcess.Wait() + mdi.processor.SignalEndOfProcessing(interceptedMultiData) mdi.throttler.EndProcessing() }() for _, dataBuff := range multiDataBuff { - interceptedData, err := mdi.factory.Create(dataBuff) + interceptedData, err := mdi.interceptedData(dataBuff) if err != nil { lastErrEncountered = err wgProcess.Done() continue } - err = interceptedData.CheckValidity() - if err != nil { - lastErrEncountered = err - wgProcess.Done() - continue - } + interceptedMultiData = append(interceptedMultiData, interceptedData) //data is validated, add it to filtered out buff filteredMultiDataBuff = append(filteredMultiDataBuff, dataBuff) @@ -121,6 +119,20 @@ func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, return lastErrEncountered } +func (mdi *MultiDataInterceptor) interceptedData(dataBuff []byte) (process.InterceptedData, error) { + interceptedData, err := mdi.factory.Create(dataBuff) + if err != nil { + return nil, err + } + + err = interceptedData.CheckValidity() + if err != nil { + return nil, err + } + + return interceptedData, nil +} + // IsInterfaceNil returns true if there is no value under the interface func (mdi *MultiDataInterceptor) IsInterfaceNil() bool { if mdi == nil { diff --git a/process/interceptors/processor/argHdrInterceptorProcessor.go b/process/interceptors/processor/argHdrInterceptorProcessor.go index 065cde24eb8..f57d1098cbc 100644 --- a/process/interceptors/processor/argHdrInterceptorProcessor.go +++ b/process/interceptors/processor/argHdrInterceptorProcessor.go @@ -3,13 +3,11 @@ package processor import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/storage" ) // ArgHdrInterceptorProcessor is the argument for the interceptor processor used for headers (shard, meta and so on) type ArgHdrInterceptorProcessor struct { - Headers storage.Cacher - HeadersNonces dataRetriever.Uint64SyncMapCacher - HdrValidator process.HeaderValidator - BlackList process.BlackListHandler + Headers dataRetriever.HeadersPool + HdrValidator process.HeaderValidator + BlackList process.BlackListHandler } diff --git a/process/interceptors/processor/hdrInterceptorProcessor.go b/process/interceptors/processor/hdrInterceptorProcessor.go index 2dc3f7e6c71..119f7b22981 100644 --- a/process/interceptors/processor/hdrInterceptorProcessor.go +++ b/process/interceptors/processor/hdrInterceptorProcessor.go @@ -3,18 +3,15 @@ package processor import ( "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/storage" ) // HdrInterceptorProcessor is the processor used when intercepting headers // (shard headers, meta headers) structs which satisfy HeaderHandler interface. type HdrInterceptorProcessor struct { - headers storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher - hdrValidator process.HeaderValidator - blackList process.BlackListHandler + headers dataRetriever.HeadersPool + hdrValidator process.HeaderValidator + blackList process.BlackListHandler } // NewHdrInterceptorProcessor creates a new TxInterceptorProcessor instance @@ -25,9 +22,6 @@ func NewHdrInterceptorProcessor(argument *ArgHdrInterceptorProcessor) (*HdrInter if check.IfNil(argument.Headers) { return nil, process.ErrNilCacher } - if check.IfNil(argument.HeadersNonces) { - return nil, process.ErrNilUint64SyncMapCacher - } if check.IfNil(argument.HdrValidator) { return nil, process.ErrNilHdrValidator } @@ -36,10 +30,9 @@ func NewHdrInterceptorProcessor(argument *ArgHdrInterceptorProcessor) (*HdrInter } return &HdrInterceptorProcessor{ - headers: argument.Headers, - headersNonces: argument.HeadersNonces, - hdrValidator: argument.HdrValidator, - blackList: argument.BlackList, + headers: argument.Headers, + hdrValidator: argument.HdrValidator, + blackList: argument.BlackList, }, nil } @@ -67,15 +60,15 @@ func (hip *HdrInterceptorProcessor) Save(data process.InterceptedData) error { return process.ErrWrongTypeAssertion } - hip.headers.HasOrAdd(interceptedHdr.Hash(), interceptedHdr.HeaderHandler()) - - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(interceptedHdr.HeaderHandler().GetShardID(), interceptedHdr.Hash()) - hip.headersNonces.Merge(interceptedHdr.HeaderHandler().GetNonce(), syncMap) + hip.headers.AddHeader(interceptedHdr.Hash(), interceptedHdr.HeaderHandler()) return nil } +// SignalEndOfProcessing signals the end of processing +func (hip *HdrInterceptorProcessor) SignalEndOfProcessing(data []process.InterceptedData) { +} + // IsInterfaceNil returns true if there is no value under the interface func (hip *HdrInterceptorProcessor) IsInterfaceNil() bool { if hip == nil { diff --git a/process/interceptors/processor/hdrInterceptorProcessor_test.go b/process/interceptors/processor/hdrInterceptorProcessor_test.go index 286cb945872..a1d4796b650 100644 --- a/process/interceptors/processor/hdrInterceptorProcessor_test.go +++ b/process/interceptors/processor/hdrInterceptorProcessor_test.go @@ -6,7 +6,6 @@ import ( "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -15,10 +14,9 @@ import ( func createMockHdrArgument() *processor.ArgHdrInterceptorProcessor { arg := &processor.ArgHdrInterceptorProcessor{ - Headers: &mock.CacherStub{}, - HeadersNonces: &mock.Uint64SyncMapCacherStub{}, - HdrValidator: &mock.HeaderValidatorStub{}, - BlackList: &mock.BlackListHandlerStub{}, + Headers: &mock.HeadersCacherStub{}, + HdrValidator: &mock.HeaderValidatorStub{}, + BlackList: &mock.BlackListHandlerStub{}, } return arg @@ -46,17 +44,6 @@ func TestNewHdrInterceptorProcessor_NilHeadersShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilCacher, err) } -func TestNewHdrInterceptorProcessor_NilHeadersNoncesShouldErr(t *testing.T) { - t.Parallel() - - arg := createMockHdrArgument() - arg.HeadersNonces = nil - hip, err := processor.NewHdrInterceptorProcessor(arg) - - assert.Nil(t, hip) - assert.Equal(t, process.ErrNilUint64SyncMapCacher, err) -} - func TestNewHdrInterceptorProcessor_NilValidatorShouldErr(t *testing.T) { t.Parallel() @@ -192,19 +179,11 @@ func TestHdrInterceptorProcessor_SaveShouldWork(t *testing.T) { } wasAddedHeaders := false - wasMergedHeadersNonces := false arg := createMockHdrArgument() - arg.Headers = &mock.CacherStub{ - HasOrAddCalled: func(key []byte, value interface{}) (ok, evicted bool) { + arg.Headers = &mock.HeadersCacherStub{ + AddCalled: func(headerHash []byte, header data.HeaderHandler) { wasAddedHeaders = true - - return true, true - }, - } - arg.HeadersNonces = &mock.Uint64SyncMapCacherStub{ - MergeCalled: func(nonce uint64, src dataRetriever.ShardIdHashMap) { - wasMergedHeadersNonces = true }, } @@ -213,7 +192,7 @@ func TestHdrInterceptorProcessor_SaveShouldWork(t *testing.T) { err := hip.Save(hdrInterceptedData) assert.Nil(t, err) - assert.True(t, wasAddedHeaders && wasMergedHeadersNonces) + assert.True(t, wasAddedHeaders) } //------- IsInterfaceNil diff --git a/process/interceptors/processor/trieNodeInterceptorProcessor.go b/process/interceptors/processor/trieNodeInterceptorProcessor.go new file mode 100644 index 00000000000..4b9202e457f --- /dev/null +++ b/process/interceptors/processor/trieNodeInterceptorProcessor.go @@ -0,0 +1,62 @@ +package processor + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// TrieNodeInterceptorProcessor is the processor used when intercepting trie nodes +type TrieNodeInterceptorProcessor struct { + interceptedNodes storage.Cacher +} + +// NewTrieNodesInterceptorProcessor creates a new instance of TrieNodeInterceptorProcessor +func NewTrieNodesInterceptorProcessor(interceptedNodes storage.Cacher) (*TrieNodeInterceptorProcessor, error) { + if check.IfNil(interceptedNodes) { + return nil, process.ErrNilCacher + } + + return &TrieNodeInterceptorProcessor{ + interceptedNodes: interceptedNodes, + }, nil +} + +// Validate checks if the intercepted data can be processed +func (tnip *TrieNodeInterceptorProcessor) Validate(data process.InterceptedData) error { + return nil +} + +// Save saves the intercepted trie node in the intercepted nodes cacher +func (tnip *TrieNodeInterceptorProcessor) Save(data process.InterceptedData) error { + nodeData, ok := data.(*trie.InterceptedTrieNode) + if !ok { + return process.ErrWrongTypeAssertion + } + + tnip.interceptedNodes.Put(nodeData.Hash(), nodeData) + return nil +} + +// SignalEndOfProcessing signals the end of processing +func (tnip *TrieNodeInterceptorProcessor) SignalEndOfProcessing(data []process.InterceptedData) { + nodeData, ok := data[0].(*trie.InterceptedTrieNode) + if !ok { + log.Debug("intercepted data is not a trie node") + return + } + + // TODO instead of using a node to trigger the end of processing, use a dedicated channel + // between interceptor and sync + nodeData.CreateEndOfProcessingTriggerNode() + err := tnip.Save(nodeData) + if err != nil { + log.Debug(err.Error()) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tnip *TrieNodeInterceptorProcessor) IsInterfaceNil() bool { + return tnip == nil +} diff --git a/process/interceptors/processor/trieNodeInterceptorProcessor_test.go b/process/interceptors/processor/trieNodeInterceptorProcessor_test.go new file mode 100644 index 00000000000..34337096d7e --- /dev/null +++ b/process/interceptors/processor/trieNodeInterceptorProcessor_test.go @@ -0,0 +1,75 @@ +package processor_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data/trie" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/interceptors/processor" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewTrieNodesInterceptorProcessor_NilCacherShouldErr(t *testing.T) { + t.Parallel() + + tnip, err := processor.NewTrieNodesInterceptorProcessor(nil) + assert.Nil(t, tnip) + assert.Equal(t, process.ErrNilCacher, err) +} + +func TestNewTrieNodesInterceptorProcessor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + tnip, err := processor.NewTrieNodesInterceptorProcessor(&mock.CacherMock{}) + assert.Nil(t, err) + assert.NotNil(t, tnip) +} + +//------- Validate + +func TestTrieNodesInterceptorProcessor_ValidateShouldWork(t *testing.T) { + t.Parallel() + + tnip, _ := processor.NewTrieNodesInterceptorProcessor(&mock.CacherMock{}) + + assert.Nil(t, tnip.Validate(nil)) +} + +//------- Save + +func TestTrieNodesInterceptorProcessor_SaveWrongTypeAssertion(t *testing.T) { + t.Parallel() + + tnip, _ := processor.NewTrieNodesInterceptorProcessor(&mock.CacherMock{}) + + err := tnip.Save(nil) + assert.Equal(t, process.ErrWrongTypeAssertion, err) +} + +func TestTrieNodesInterceptorProcessor_SaveShouldPutInCacher(t *testing.T) { + t.Parallel() + + putCalled := false + cacher := &mock.CacherStub{ + PutCalled: func(key []byte, value interface{}) (evicted bool) { + putCalled = true + return false + }, + } + tnip, _ := processor.NewTrieNodesInterceptorProcessor(cacher) + + err := tnip.Save(&trie.InterceptedTrieNode{}) + assert.Nil(t, err) + assert.True(t, putCalled) +} + +//------- IsInterfaceNil + +func TestTrieNodesInterceptorProcessor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var tnip *processor.TrieNodeInterceptorProcessor + assert.True(t, check.IfNil(tnip)) +} diff --git a/process/interceptors/processor/txBodyInterceptorProcessor.go b/process/interceptors/processor/txBodyInterceptorProcessor.go index b6fcf03bf3c..83a3a6a585f 100644 --- a/process/interceptors/processor/txBodyInterceptorProcessor.go +++ b/process/interceptors/processor/txBodyInterceptorProcessor.go @@ -105,6 +105,10 @@ func (tbip *TxBodyInterceptorProcessor) checkMiniblock(miniblock *block.MiniBloc return nil } +// SignalEndOfProcessing signals the end of processing +func (tbip *TxBodyInterceptorProcessor) SignalEndOfProcessing(data []process.InterceptedData) { +} + // IsInterfaceNil returns true if there is no value under the interface func (tbip *TxBodyInterceptorProcessor) IsInterfaceNil() bool { if tbip == nil { diff --git a/process/interceptors/processor/txInterceptorProcessor.go b/process/interceptors/processor/txInterceptorProcessor.go index 8058a083599..8a0094cdac6 100644 --- a/process/interceptors/processor/txInterceptorProcessor.go +++ b/process/interceptors/processor/txInterceptorProcessor.go @@ -63,6 +63,10 @@ func (txip *TxInterceptorProcessor) Save(data process.InterceptedData) error { return nil } +// SignalEndOfProcessing signals the end of processing +func (txip *TxInterceptorProcessor) SignalEndOfProcessing(data []process.InterceptedData) { +} + // IsInterfaceNil returns true if there is no value under the interface func (txip *TxInterceptorProcessor) IsInterfaceNil() bool { if txip == nil { diff --git a/process/interface.go b/process/interface.go index 1e1b68f916b..e7f6b6a5b62 100644 --- a/process/interface.go +++ b/process/interface.go @@ -13,6 +13,8 @@ import ( "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-vm-common" ) @@ -91,6 +93,7 @@ type InterceptedData interface { type InterceptorProcessor interface { Validate(data InterceptedData) error Save(data InterceptedData) error + SignalEndOfProcessing(data []InterceptedData) IsInterfaceNil() bool } @@ -122,7 +125,8 @@ type TransactionCoordinator interface { GetAllCurrentUsedTxs(blockType block.Type) map[string]data.TransactionHandler - VerifyCreatedBlockTransactions(body block.Body) error + CreateReceiptsHash() ([]byte, error) + VerifyCreatedBlockTransactions(hdr data.HeaderHandler, body block.Body) error IsInterfaceNil() bool } @@ -142,6 +146,7 @@ type IntermediateTransactionHandler interface { SaveCurrentIntermediateTxToStorage() error GetAllCurrentFinishedTxs() map[string]data.TransactionHandler CreateBlockStarted() + GetCreatedInShardMiniBlock() *block.MiniBlock IsInterfaceNil() bool } @@ -196,7 +201,6 @@ type PreProcessor interface { RequestTransactionsForMiniBlock(miniBlock *block.MiniBlock) int ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool) error - CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool) (*block.MiniBlock, error) CreateAndProcessMiniBlocks(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, haveTime func() bool) (block.MiniBlockSlice, error) GetAllCurrentUsedTxs() map[string]data.TransactionHandler @@ -212,14 +216,11 @@ type BlockProcessor interface { CreateNewHeader() data.HeaderHandler CreateBlockBody(initialHdrData data.HeaderHandler, haveTime func() bool) (data.BodyHandler, error) RestoreBlockIntoPools(header data.HeaderHandler, body data.BodyHandler) error - ApplyBodyToHeader(hdr data.HeaderHandler, body data.BodyHandler) error - ApplyProcessedMiniBlocks(processedMiniBlocks map[string]map[string]struct{}) + ApplyBodyToHeader(hdr data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) + ApplyProcessedMiniBlocks(processedMiniBlocks *processedMb.ProcessedMiniBlockTracker) MarshalizedDataToBroadcast(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler - AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) - RestoreLastNotarizedHrdsToGenesis() - SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) SetNumProcessedObj(numObj uint64) IsInterfaceNil() bool } @@ -235,6 +236,33 @@ type ValidatorStatisticsProcessor interface { RootHash() ([]byte, error) } +// Checker provides functionality to checks the integrity and validity of a data structure +type Checker interface { + // IntegrityAndValidity does both validity and integrity checks on the data structure + IntegrityAndValidity(coordinator sharding.Coordinator) error + // Integrity checks only the integrity of the data + Integrity(coordinator sharding.Coordinator) error + // IsInterfaceNil returns true if there is no value under the interface + IsInterfaceNil() bool +} + +// HeaderConstructionValidator provides functionality to verify header construction +type HeaderConstructionValidator interface { + IsHeaderConstructionValid(currHdr, prevHdr data.HeaderHandler) error + IsInterfaceNil() bool +} + +// SigVerifier provides functionality to verify a signature of a signed data structure that holds also the verifying parameters +type SigVerifier interface { + VerifySig() error +} + +// SignedDataValidator provides functionality to check the validity and signature of a data structure +type SignedDataValidator interface { + SigVerifier + Checker +} + // HashAccesser interface provides functionality over hashable objects type HashAccesser interface { SetHash([]byte) @@ -255,14 +283,15 @@ type Bootstrapper interface { // ForkDetector is an interface that defines the behaviour of a struct that is able // to detect forks type ForkDetector interface { - AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error - RemoveHeaders(nonce uint64, hash []byte) + AddHeader(header data.HeaderHandler, headerHash []byte, state BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error + RemoveHeader(nonce uint64, hash []byte) CheckFork() *ForkInfo GetHighestFinalBlockNonce() uint64 + GetHighestFinalBlockHash() []byte ProbableHighestNonce() uint64 - ResetProbableHighestNonce() ResetFork() - RestoreFinalCheckPointToGenesis() + SetRollBackNonce(nonce uint64) + RestoreToGenesis() GetNotarizedHeaderHash(nonce uint64) []byte IsInterfaceNil() bool } @@ -339,6 +368,36 @@ type VirtualMachinesContainerFactory interface { IsInterfaceNil() bool } +// EpochStartTriggerHandler defines that actions which are needed by processor for start of epoch +type EpochStartTriggerHandler interface { + Update(round uint64) + ReceivedHeader(header data.HeaderHandler) + IsEpochStart() bool + Epoch() uint32 + EpochStartRound() uint64 + SetProcessed(header data.HeaderHandler) + Revert() + EpochStartMetaHdrHash() []byte + IsInterfaceNil() bool + SetFinalityAttestingRound(round uint64) + EpochFinalityAttestingRound() uint64 +} + +// EpochBootstrapper defines the actions needed by bootstrapper +type EpochBootstrapper interface { + SetCurrentEpochStartRound(round uint64) + IsInterfaceNil() bool +} + +// PendingMiniBlocksHandler is an interface to keep unfinalized miniblocks +type PendingMiniBlocksHandler interface { + PendingMiniBlockHeaders(lastNotarizedHeaders []data.HeaderHandler) ([]block.ShardMiniBlockHeader, error) + AddProcessedHeader(handler data.HeaderHandler) error + RevertHeader(handler data.HeaderHandler) error + IsInterfaceNil() bool +} + +// BlockChainHookHandler defines the actions which should be performed by implementation type BlockChainHookHandler interface { TemporaryAccountsHandler SetCurrentHeader(hdr data.HeaderHandler) @@ -367,12 +426,15 @@ type DataPacker interface { // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { - RequestHeaderByNonce(shardId uint32, nonce uint64) + RequestShardHeader(shardId uint32, hash []byte) + RequestMetaHeader(hash []byte) + RequestMetaHeaderByNonce(nonce uint64) + RequestShardHeaderByNonce(shardId uint32, nonce uint64) RequestTransaction(shardId uint32, txHashes [][]byte) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) RequestMiniBlock(shardId uint32, miniblockHash []byte) - RequestHeader(shardId uint32, hash []byte) + RequestTrieNodes(shardId uint32, hash []byte) IsInterfaceNil() bool } @@ -444,7 +506,8 @@ type FeeHandler interface { type TransactionWithFeeHandler interface { GetGasLimit() uint64 GetGasPrice() uint64 - GetData() string + GetData() []byte + GetRecvAddress() []byte } // EconomicsAddressesHandler will return information about economics addresses @@ -539,3 +602,27 @@ type InterceptedHeaderSigVerifier interface { VerifySignature(header data.HeaderHandler) error IsInterfaceNil() bool } + +// BlockTracker defines the functionality for node to track the blocks which are received from network +type BlockTracker interface { + AddCrossNotarizedHeader(shradID uint32, crossNotarizedHeader data.HeaderHandler, crossNotarizedHeaderHash []byte) + AddSelfNotarizedHeader(shardID uint32, selfNotarizedHeader data.HeaderHandler, selfNotarizedHeaderHash []byte) + AddTrackedHeader(header data.HeaderHandler, hash []byte) + CleanupHeadersBehindNonce(shardID uint32, selfNotarizedNonce uint64, crossNotarizedNonce uint64) + ComputeLongestChain(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) + ComputeLongestMetaChainFromLastNotarized() ([]data.HeaderHandler, [][]byte, error) + ComputeLongestShardsChainsFromLastNotarized() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) + DisplayTrackedHeaders() + GetCrossNotarizedHeader(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeader(shardID uint32) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeadersForAllShards() (map[uint32]data.HeaderHandler, error) + GetTrackedHeaders(shardID uint32) ([]data.HeaderHandler, [][]byte) + GetTrackedHeadersForAllShards() map[uint32][]data.HeaderHandler + GetTrackedHeadersWithNonce(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) + IsShardStuck(shardID uint32) bool + RegisterCrossNotarizedHeadersHandler(func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RegisterSelfNotarizedHeadersHandler(func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RemoveLastNotarizedHeaders() + RestoreToGenesis() + IsInterfaceNil() bool +} diff --git a/process/mock/accountWrapperFake.go b/process/mock/accountWrapperFake.go index 96e5675f024..5b7bd93d0df 100644 --- a/process/mock/accountWrapperFake.go +++ b/process/mock/accountWrapperFake.go @@ -25,7 +25,7 @@ func NewAccountWrapMock(adr state.AddressContainer, tracker state.AccountTracker return &AccountWrapMock{ address: adr, tracker: tracker, - trackableDataTrie: state.NewTrackableDataTrie(nil), + trackableDataTrie: state.NewTrackableDataTrie([]byte("identifier"), nil), } } diff --git a/process/mock/accountsStub.go b/process/mock/accountsStub.go index 6f6afe8b4e0..dadb9550a16 100644 --- a/process/mock/accountsStub.go +++ b/process/mock/accountsStub.go @@ -21,6 +21,11 @@ type AccountsStub struct { SaveDataTrieCalled func(acountWrapper state.AccountHandler) error RootHashCalled func() ([]byte, error) RecreateTrieCalled func(rootHash []byte) error + PruneTrieCalled func(rootHash []byte) error + SnapshotStateCalled func(rootHash []byte) + SetStateCheckpointCalled func(rootHash []byte) + CancelPruneCalled func(rootHash []byte) + IsPruningEnabledCalled func() bool } var errNotImplemented = errors.New("not implemented") @@ -139,10 +144,41 @@ func (aam *AccountsStub) RecreateTrie(rootHash []byte) error { return errNotImplemented } -// IsInterfaceNil returns true if there is no value under the interface -func (aam *AccountsStub) IsInterfaceNil() bool { - if aam == nil { - return true +func (aam *AccountsStub) PruneTrie(rootHash []byte) error { + if aam.PruneTrieCalled != nil { + return aam.PruneTrieCalled(rootHash) } + + return errNotImplemented +} + +func (aam *AccountsStub) CancelPrune(rootHash []byte) { + if aam.CancelPruneCalled != nil { + aam.CancelPruneCalled(rootHash) + } +} + +func (aam *AccountsStub) SnapshotState(rootHash []byte) { + if aam.SnapshotStateCalled != nil { + aam.SnapshotStateCalled(rootHash) + } +} + +func (aam *AccountsStub) SetStateCheckpoint(rootHash []byte) { + if aam.SetStateCheckpointCalled != nil { + aam.SetStateCheckpointCalled(rootHash) + } +} + +func (aam *AccountsStub) IsPruningEnabled() bool { + if aam.IsPruningEnabledCalled != nil { + aam.IsPruningEnabledCalled() + } + return false } + +// IsInterfaceNil returns true if there is no value under the interface +func (aam *AccountsStub) IsInterfaceNil() bool { + return aam == nil +} diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 99af831cbd5..ed34ea0c842 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -5,6 +5,7 @@ import ( "time" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" ) type BlockProcessorMock struct { @@ -16,7 +17,7 @@ type BlockProcessorMock struct { RestoreBlockIntoPoolsCalled func(header data.HeaderHandler, body data.BodyHandler) error noShards uint32 SetOnRequestTransactionCalled func(f func(destShardID uint32, txHash []byte)) - ApplyBodyToHeaderCalled func(header data.HeaderHandler, body data.BodyHandler) error + ApplyBodyToHeaderCalled func(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) MarshalizedDataToBroadcastCalled func(header data.HeaderHandler, body data.BodyHandler) (map[uint32][]byte, map[string][][]byte, error) DecodeBlockBodyCalled func(dta []byte) data.BodyHandler DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler @@ -25,7 +26,7 @@ type BlockProcessorMock struct { RevertStateToBlockCalled func(header data.HeaderHandler) error } -func (bpm *BlockProcessorMock) ApplyProcessedMiniBlocks(miniBlocks map[string]map[string]struct{}) { +func (bpm *BlockProcessorMock) ApplyProcessedMiniBlocks(*processedMb.ProcessedMiniBlockTracker) { } @@ -65,7 +66,7 @@ func (bpm *BlockProcessorMock) RestoreBlockIntoPools(header data.HeaderHandler, return bpm.RestoreBlockIntoPoolsCalled(header, body) } -func (bpm *BlockProcessorMock) ApplyBodyToHeader(header data.HeaderHandler, body data.BodyHandler) error { +func (bpm *BlockProcessorMock) ApplyBodyToHeader(header data.HeaderHandler, body data.BodyHandler) (data.BodyHandler, error) { return bpm.ApplyBodyToHeaderCalled(header, body) } diff --git a/process/mock/blockTrackerMock.go b/process/mock/blockTrackerMock.go new file mode 100644 index 00000000000..e276117bb80 --- /dev/null +++ b/process/mock/blockTrackerMock.go @@ -0,0 +1,326 @@ +package mock + +import ( + "bytes" + "sort" + "sync" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type headerInfo struct { + hash []byte + header data.HeaderHandler +} + +type BlockTrackerMock struct { + AddTrackedHeaderCalled func(header data.HeaderHandler, hash []byte) + AddCrossNotarizedHeaderCalled func(shardID uint32, crossNotarizedHeader data.HeaderHandler, crossNotarizedHeaderHash []byte) + AddSelfNotarizedHeaderCalled func(shardID uint32, selfNotarizedHeader data.HeaderHandler, selfNotarizedHeaderHash []byte) + CleanupHeadersBehindNonceCalled func(shardID uint32, selfNotarizedNonce uint64, crossNotarizedNonce uint64) + ComputeLongestChainCalled func(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) + ComputeLongestMetaChainFromLastNotarizedCalled func() ([]data.HeaderHandler, [][]byte, error) + ComputeLongestShardsChainsFromLastNotarizedCalled func() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) + DisplayTrackedHeadersCalled func() + GetCrossNotarizedHeaderCalled func(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeaderCalled func(shardID uint32) (data.HeaderHandler, []byte, error) + GetLastCrossNotarizedHeadersForAllShardsCalled func() (map[uint32]data.HeaderHandler, error) + GetTrackedHeadersCalled func(shardID uint32) ([]data.HeaderHandler, [][]byte) + GetTrackedHeadersForAllShardsCalled func() map[uint32][]data.HeaderHandler + GetTrackedHeadersWithNonceCalled func(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) + IsShardStuckCalled func(shardId uint32) bool + RegisterCrossNotarizedHeadersHandlerCalled func(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RegisterSelfNotarizedHeadersHandlerCalled func(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) + RemoveLastNotarizedHeadersCalled func() + RestoreToGenesisCalled func() + + shardCoordinator sharding.Coordinator + mutCrossNotarizedHeaders sync.RWMutex + crossNotarizedHeaders map[uint32][]*headerInfo + + mutHeaders sync.RWMutex + headers map[uint32][]*headerInfo +} + +func NewBlockTrackerMock(shardCoordinator sharding.Coordinator, startHeaders map[uint32]data.HeaderHandler) *BlockTrackerMock { + bts := BlockTrackerMock{ + shardCoordinator: shardCoordinator, + } + bts.headers = make(map[uint32][]*headerInfo) + _ = bts.InitCrossNotarizedHeaders(startHeaders) + return &bts +} + +func (btm *BlockTrackerMock) AddTrackedHeader(header data.HeaderHandler, hash []byte) { + if btm.AddTrackedHeaderCalled != nil { + btm.AddTrackedHeaderCalled(header, hash) + } + + if check.IfNil(header) { + return + } + + shardID := header.GetShardID() + + btm.mutHeaders.Lock() + defer btm.mutHeaders.Unlock() + + headersForShard, ok := btm.headers[shardID] + if !ok { + headersForShard = make([]*headerInfo, 0) + } + + for _, headerInfo := range headersForShard { + if bytes.Equal(headerInfo.hash, hash) { + return + } + } + + headersForShard = append(headersForShard, &headerInfo{hash: hash, header: header}) + btm.headers[shardID] = headersForShard +} + +func (btm *BlockTrackerMock) InitCrossNotarizedHeaders(startHeaders map[uint32]data.HeaderHandler) error { + btm.mutCrossNotarizedHeaders.Lock() + defer btm.mutCrossNotarizedHeaders.Unlock() + + if startHeaders == nil { + return process.ErrNotarizedHeadersSliceIsNil + } + + btm.crossNotarizedHeaders = make(map[uint32][]*headerInfo) + + for _, startHeader := range startHeaders { + shardID := startHeader.GetShardID() + btm.crossNotarizedHeaders[shardID] = append(btm.crossNotarizedHeaders[shardID], &headerInfo{header: startHeader, hash: nil}) + } + + return nil +} + +func (btm *BlockTrackerMock) AddCrossNotarizedHeader(shardID uint32, crossNotarizedHeader data.HeaderHandler, crossNotarizedHeaderHash []byte) { + if btm.AddCrossNotarizedHeaderCalled != nil { + btm.AddCrossNotarizedHeaderCalled(shardID, crossNotarizedHeader, crossNotarizedHeaderHash) + return + } + + if check.IfNil(crossNotarizedHeader) { + return + } + + btm.mutCrossNotarizedHeaders.Lock() + btm.crossNotarizedHeaders[shardID] = append(btm.crossNotarizedHeaders[shardID], &headerInfo{header: crossNotarizedHeader, hash: crossNotarizedHeaderHash}) + if len(btm.crossNotarizedHeaders[shardID]) > 1 { + sort.Slice(btm.crossNotarizedHeaders[shardID], func(i, j int) bool { + return btm.crossNotarizedHeaders[shardID][i].header.GetNonce() < btm.crossNotarizedHeaders[shardID][j].header.GetNonce() + }) + } + btm.mutCrossNotarizedHeaders.Unlock() +} + +func (btm *BlockTrackerMock) AddSelfNotarizedHeader(shardID uint32, selfNotarizedHeader data.HeaderHandler, selfNotarizedHeaderHash []byte) { + if btm.AddSelfNotarizedHeaderCalled != nil { + btm.AddSelfNotarizedHeaderCalled(shardID, selfNotarizedHeader, selfNotarizedHeaderHash) + } +} + +func (btm *BlockTrackerMock) CleanupHeadersBehindNonce(shardID uint32, selfNotarizedNonce uint64, crossNotarizedNonce uint64) { + if btm.CleanupHeadersBehindNonceCalled != nil { + btm.CleanupHeadersBehindNonceCalled(shardID, selfNotarizedNonce, crossNotarizedNonce) + } +} + +func (btm *BlockTrackerMock) ComputeLongestChain(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) { + if btm.ComputeLongestChainCalled != nil { + return btm.ComputeLongestChainCalled(shardID, header) + } + + headersInfo, ok := btm.headers[shardID] + if !ok { + return nil, nil + } + + headers := make([]data.HeaderHandler, 0) + hashes := make([][]byte, 0) + + for _, headerInfo := range headersInfo { + headers = append(headers, headerInfo.header) + hashes = append(hashes, headerInfo.hash) + } + + return headers, hashes +} + +func (btm *BlockTrackerMock) ComputeLongestMetaChainFromLastNotarized() ([]data.HeaderHandler, [][]byte, error) { + lastCrossNotarizedHeader, _, err := btm.GetLastCrossNotarizedHeader(sharding.MetachainShardId) + if err != nil { + return nil, nil, err + } + + hdrsForShard, hdrsHashesForShard := btm.ComputeLongestChain(sharding.MetachainShardId, lastCrossNotarizedHeader) + + return hdrsForShard, hdrsHashesForShard, nil +} + +func (btm *BlockTrackerMock) ComputeLongestShardsChainsFromLastNotarized() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) { + hdrsMap := make(map[uint32][]data.HeaderHandler) + hdrsHashesMap := make(map[uint32][][]byte) + + lastCrossNotarizedHeaders, err := btm.GetLastCrossNotarizedHeadersForAllShards() + if err != nil { + return nil, nil, nil, err + } + + maxHdrLen := 0 + for shardID := uint32(0); shardID < btm.shardCoordinator.NumberOfShards(); shardID++ { + hdrsForShard, hdrsHashesForShard := btm.ComputeLongestChain(shardID, lastCrossNotarizedHeaders[shardID]) + + hdrsMap[shardID] = append(hdrsMap[shardID], hdrsForShard...) + hdrsHashesMap[shardID] = append(hdrsHashesMap[shardID], hdrsHashesForShard...) + + tmpHdrLen := len(hdrsForShard) + if maxHdrLen < tmpHdrLen { + maxHdrLen = tmpHdrLen + } + } + + orderedHeaders := make([]data.HeaderHandler, 0) + orderedHeadersHashes := make([][]byte, 0) + + // copy from map to lists - equality between number of headers per shard + for i := 0; i < maxHdrLen; i++ { + for shardID := uint32(0); shardID < btm.shardCoordinator.NumberOfShards(); shardID++ { + hdrsForShard := hdrsMap[shardID] + hdrsHashesForShard := hdrsHashesMap[shardID] + if i >= len(hdrsForShard) { + continue + } + + orderedHeaders = append(orderedHeaders, hdrsForShard[i]) + orderedHeadersHashes = append(orderedHeadersHashes, hdrsHashesForShard[i]) + } + } + + return orderedHeaders, orderedHeadersHashes, hdrsMap, nil +} + +func (btm *BlockTrackerMock) DisplayTrackedHeaders() { + if btm.DisplayTrackedHeadersCalled != nil { + btm.DisplayTrackedHeadersCalled() + } +} + +func (btm *BlockTrackerMock) GetCrossNotarizedHeader(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + if btm.GetCrossNotarizedHeaderCalled != nil { + return btm.GetCrossNotarizedHeaderCalled(shardID, offset) + } + + return nil, nil, nil +} + +func (btm *BlockTrackerMock) GetLastCrossNotarizedHeader(shardID uint32) (data.HeaderHandler, []byte, error) { + if btm.GetLastCrossNotarizedHeaderCalled != nil { + return btm.GetLastCrossNotarizedHeaderCalled(shardID) + } + + btm.mutCrossNotarizedHeaders.RLock() + defer btm.mutCrossNotarizedHeaders.RUnlock() + + if btm.crossNotarizedHeaders == nil { + return nil, nil, process.ErrNotarizedHeadersSliceIsNil + } + + headerInfo := btm.lastCrossNotarizedHdrForShard(shardID) + if headerInfo == nil { + return nil, nil, process.ErrNotarizedHeadersSliceForShardIsNil + } + + return headerInfo.header, headerInfo.hash, nil +} + +func (btm *BlockTrackerMock) GetLastCrossNotarizedHeadersForAllShards() (map[uint32]data.HeaderHandler, error) { + lastCrossNotarizedHeaders := make(map[uint32]data.HeaderHandler, btm.shardCoordinator.NumberOfShards()) + + // save last committed header for verification + for shardID := uint32(0); shardID < btm.shardCoordinator.NumberOfShards(); shardID++ { + lastCrossNotarizedHeader, _, err := btm.GetLastCrossNotarizedHeader(shardID) + if err != nil { + return nil, err + } + + lastCrossNotarizedHeaders[shardID] = lastCrossNotarizedHeader + } + + return lastCrossNotarizedHeaders, nil +} + +func (btm *BlockTrackerMock) lastCrossNotarizedHdrForShard(shardID uint32) *headerInfo { + crossNotarizedHeadersCount := len(btm.crossNotarizedHeaders[shardID]) + if crossNotarizedHeadersCount > 0 { + return btm.crossNotarizedHeaders[shardID][crossNotarizedHeadersCount-1] + } + + return nil +} + +func (btm *BlockTrackerMock) GetTrackedHeaders(shardID uint32) ([]data.HeaderHandler, [][]byte) { + if btm.GetTrackedHeadersCalled != nil { + return btm.GetTrackedHeadersCalled(shardID) + } + + return nil, nil +} + +func (btm *BlockTrackerMock) GetTrackedHeadersForAllShards() map[uint32][]data.HeaderHandler { + trackedHeaders := make(map[uint32][]data.HeaderHandler) + + for shardID := uint32(0); shardID < btm.shardCoordinator.NumberOfShards(); shardID++ { + trackedHeadersForShard, _ := btm.GetTrackedHeaders(shardID) + trackedHeaders[shardID] = append(trackedHeaders[shardID], trackedHeadersForShard...) + } + + return trackedHeaders +} + +func (btm *BlockTrackerMock) GetTrackedHeadersWithNonce(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) { + if btm.GetTrackedHeadersWithNonceCalled != nil { + return btm.GetTrackedHeadersWithNonceCalled(shardID, nonce) + } + + return nil, nil +} + +func (btm *BlockTrackerMock) IsShardStuck(shardId uint32) bool { + return btm.IsShardStuckCalled(shardId) +} + +func (btm *BlockTrackerMock) RegisterCrossNotarizedHeadersHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) { + if btm.RegisterCrossNotarizedHeadersHandlerCalled != nil { + btm.RegisterCrossNotarizedHeadersHandlerCalled(handler) + } +} + +func (btm *BlockTrackerMock) RegisterSelfNotarizedHeadersHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) { + if btm.RegisterSelfNotarizedHeadersHandlerCalled != nil { + btm.RegisterSelfNotarizedHeadersHandlerCalled(handler) + } +} + +func (btm *BlockTrackerMock) RemoveLastNotarizedHeaders() { + if btm.RemoveLastNotarizedHeadersCalled != nil { + btm.RemoveLastNotarizedHeadersCalled() + } +} + +func (btm *BlockTrackerMock) RestoreToGenesis() { + if btm.RestoreToGenesisCalled != nil { + btm.RestoreToGenesisCalled() + } +} + +func (btm *BlockTrackerMock) IsInterfaceNil() bool { + return btm == nil +} diff --git a/process/mock/cacherStub.go b/process/mock/cacherStub.go index a35caeff349..8d25d198a40 100644 --- a/process/mock/cacherStub.go +++ b/process/mock/cacherStub.go @@ -1,5 +1,7 @@ package mock +import "github.com/ElrondNetwork/elrond-go/data" + type CacherStub struct { ClearCalled func() PutCalled func(key []byte, value interface{}) (evicted bool) @@ -70,3 +72,7 @@ func (cs *CacherStub) IsInterfaceNil() bool { } return false } + +func (cs *CacherStub) GetTransactions(numRequested int, batchSizePerSender int) ([]data.TransactionHandler, [][]byte) { + panic("CacherStub.GetTransactions is not implemented") +} diff --git a/process/mock/endOfEpochTriggerStub.go b/process/mock/endOfEpochTriggerStub.go new file mode 100644 index 00000000000..5c6c4572cc1 --- /dev/null +++ b/process/mock/endOfEpochTriggerStub.go @@ -0,0 +1,83 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/data" + +type EpochStartTriggerStub struct { + ForceEpochStartCalled func(round uint64) error + IsEpochStartCalled func() bool + EpochCalled func() uint32 + ReceivedHeaderCalled func(handler data.HeaderHandler) + UpdateCalled func(round uint64) + ProcessedCalled func(header data.HeaderHandler) + EpochStartRoundCalled func() uint64 +} + +func (e *EpochStartTriggerStub) SetCurrentEpochStartRound(_ uint64) { +} + +func (e *EpochStartTriggerStub) NotifyAll(_ data.HeaderHandler) { +} + +func (e *EpochStartTriggerStub) SetFinalityAttestingRound(_ uint64) { +} + +func (e *EpochStartTriggerStub) EpochFinalityAttestingRound() uint64 { + return 0 +} + +func (e *EpochStartTriggerStub) EpochStartMetaHdrHash() []byte { + return nil +} + +func (e *EpochStartTriggerStub) Revert() { +} + +func (e *EpochStartTriggerStub) EpochStartRound() uint64 { + if e.EpochStartRoundCalled != nil { + return e.EpochStartRoundCalled() + } + return 0 +} + +func (e *EpochStartTriggerStub) Update(round uint64) { + if e.UpdateCalled != nil { + e.UpdateCalled(round) + } +} + +func (e *EpochStartTriggerStub) SetProcessed(header data.HeaderHandler) { + if e.ProcessedCalled != nil { + e.ProcessedCalled(header) + } +} + +func (e *EpochStartTriggerStub) ForceEpochStart(round uint64) error { + if e.ForceEpochStartCalled != nil { + return e.ForceEpochStartCalled(round) + } + return nil +} + +func (e *EpochStartTriggerStub) IsEpochStart() bool { + if e.IsEpochStartCalled != nil { + return e.IsEpochStartCalled() + } + return false +} + +func (e *EpochStartTriggerStub) Epoch() uint32 { + if e.EpochCalled != nil { + return e.EpochCalled() + } + return 0 +} + +func (e *EpochStartTriggerStub) ReceivedHeader(header data.HeaderHandler) { + if e.ReceivedHeaderCalled != nil { + e.ReceivedHeaderCalled(header) + } +} + +func (e *EpochStartTriggerStub) IsInterfaceNil() bool { + return e == nil +} diff --git a/process/mock/forkDetectorMock.go b/process/mock/forkDetectorMock.go index 512a423577f..bc2fc72b5fb 100644 --- a/process/mock/forkDetectorMock.go +++ b/process/mock/forkDetectorMock.go @@ -6,26 +6,28 @@ import ( ) type ForkDetectorMock struct { - AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error - RemoveHeadersCalled func(nonce uint64, hash []byte) + AddHeaderCalled func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error + RemoveHeaderCalled func(nonce uint64, hash []byte) CheckForkCalled func() *process.ForkInfo GetHighestFinalBlockNonceCalled func() uint64 + GetHighestFinalBlockHashCalled func() []byte ProbableHighestNonceCalled func() uint64 - ResetProbableHighestNonceCalled func() ResetForkCalled func() GetNotarizedHeaderHashCalled func(nonce uint64) []byte + SetRollBackNonceCalled func(nonce uint64) + RestoreToGenesisCalled func() } -func (fdm *ForkDetectorMock) RestoreFinalCheckPointToGenesis() { - +func (fdm *ForkDetectorMock) RestoreToGenesis() { + fdm.RestoreToGenesisCalled() } -func (fdm *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { - return fdm.AddHeaderCalled(header, hash, state, finalHeaders, finalHeadersHashes, isNotarizedShardStuck) +func (fdm *ForkDetectorMock) AddHeader(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { + return fdm.AddHeaderCalled(header, hash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes) } -func (fdm *ForkDetectorMock) RemoveHeaders(nonce uint64, hash []byte) { - fdm.RemoveHeadersCalled(nonce, hash) +func (fdm *ForkDetectorMock) RemoveHeader(nonce uint64, hash []byte) { + fdm.RemoveHeaderCalled(nonce, hash) } func (fdm *ForkDetectorMock) CheckFork() *process.ForkInfo { @@ -36,12 +38,18 @@ func (fdm *ForkDetectorMock) GetHighestFinalBlockNonce() uint64 { return fdm.GetHighestFinalBlockNonceCalled() } +func (fdm *ForkDetectorMock) GetHighestFinalBlockHash() []byte { + return fdm.GetHighestFinalBlockHashCalled() +} + func (fdm *ForkDetectorMock) ProbableHighestNonce() uint64 { return fdm.ProbableHighestNonceCalled() } -func (fdm *ForkDetectorMock) ResetProbableHighestNonce() { - fdm.ResetProbableHighestNonceCalled() +func (fdm *ForkDetectorMock) SetRollBackNonce(nonce uint64) { + if fdm.SetRollBackNonceCalled != nil { + fdm.SetRollBackNonceCalled(nonce) + } } func (fdm *ForkDetectorMock) ResetFork() { @@ -54,8 +62,5 @@ func (fdm *ForkDetectorMock) GetNotarizedHeaderHash(nonce uint64) []byte { // IsInterfaceNil returns true if there is no value under the interface func (fdm *ForkDetectorMock) IsInterfaceNil() bool { - if fdm == nil { - return true - } - return false + return fdm == nil } diff --git a/process/mock/hasherMock.go b/process/mock/hasherMock.go index f896cacd0dd..17b88ebcbaa 100644 --- a/process/mock/hasherMock.go +++ b/process/mock/hasherMock.go @@ -11,7 +11,7 @@ type HasherMock struct { // Compute will output the SHA's equivalent of the input string func (sha HasherMock) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -30,8 +30,5 @@ func (HasherMock) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (sha HasherMock) IsInterfaceNil() bool { - if &sha == nil { - return true - } return false } diff --git a/process/mock/headerHandlerStub.go b/process/mock/headerHandlerStub.go index 0be8d80348f..d03900d15a9 100644 --- a/process/mock/headerHandlerStub.go +++ b/process/mock/headerHandlerStub.go @@ -17,14 +17,25 @@ type HeaderHandlerStub struct { CheckChainIDCalled func(reference []byte) error } +func (hhs *HeaderHandlerStub) GetReceiptsHash() []byte { + return []byte("hash") +} + func (hhs *HeaderHandlerStub) Clone() data.HeaderHandler { return hhs.CloneCalled() } +func (hhs *HeaderHandlerStub) IsStartOfEpochBlock() bool { + return false +} + func (hhs *HeaderHandlerStub) GetShardID() uint32 { return 1 } +func (hhs *HeaderHandlerStub) SetShardID(shId uint32) { +} + func (hhs *HeaderHandlerStub) GetNonce() uint64 { return 1 } diff --git a/process/mock/headerResolverMock.go b/process/mock/headerResolverMock.go index 12cda07ab34..d0e5ffd8dd7 100644 --- a/process/mock/headerResolverMock.go +++ b/process/mock/headerResolverMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/p2p" ) @@ -8,6 +9,22 @@ type HeaderResolverMock struct { RequestDataFromHashCalled func(hash []byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error RequestDataFromNonceCalled func(nonce uint64) error + RequestDataFromEpochCalled func(identifier []byte) error + SetEpochHandlerCalled func(epochHandler dataRetriever.EpochHandler) error +} + +func (hrs *HeaderResolverMock) RequestDataFromEpoch(identifier []byte) error { + if hrs.RequestDataFromEpochCalled != nil { + return hrs.RequestDataFromEpochCalled(identifier) + } + return nil +} + +func (hrs *HeaderResolverMock) SetEpochHandler(epochHandler dataRetriever.EpochHandler) error { + if hrs.SetEpochHandlerCalled != nil { + return hrs.SetEpochHandlerCalled(epochHandler) + } + return nil } func (hrm *HeaderResolverMock) RequestDataFromHash(hash []byte) error { diff --git a/process/mock/headersCacherStub.go b/process/mock/headersCacherStub.go new file mode 100644 index 00000000000..22604317e23 --- /dev/null +++ b/process/mock/headersCacherStub.go @@ -0,0 +1,82 @@ +package mock + +import ( + "errors" + "github.com/ElrondNetwork/elrond-go/data" +) + +type HeadersCacherStub struct { + AddCalled func(headerHash []byte, header data.HeaderHandler) + RemoveHeaderByHashCalled func(headerHash []byte) + RemoveHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) + GetHeaderByNonceAndShardIdCalled func(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) + GetHeaderByHashCalled func(hash []byte) (data.HeaderHandler, error) + ClearCalled func() + RegisterHandlerCalled func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) + NoncesCalled func(shardId uint32) []uint64 + LenCalled func() int + MaxSizeCalled func() int +} + +func (hcs *HeadersCacherStub) AddHeader(headerHash []byte, header data.HeaderHandler) { + if hcs.AddCalled != nil { + hcs.AddCalled(headerHash, header) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByHash(headerHash []byte) { + if hcs.RemoveHeaderByHashCalled != nil { + hcs.RemoveHeaderByHashCalled(headerHash) + } +} + +func (hcs *HeadersCacherStub) RemoveHeaderByNonceAndShardId(hdrNonce uint64, shardId uint32) { + if hcs.RemoveHeaderByNonceAndShardIdCalled != nil { + hcs.RemoveHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } +} + +func (hcs *HeadersCacherStub) GetHeadersByNonceAndShardId(hdrNonce uint64, shardId uint32) ([]data.HeaderHandler, [][]byte, error) { + if hcs.GetHeaderByNonceAndShardIdCalled != nil { + return hcs.GetHeaderByNonceAndShardIdCalled(hdrNonce, shardId) + } + return nil, nil, errors.New("err") +} + +func (hcs *HeadersCacherStub) GetHeaderByHash(hash []byte) (data.HeaderHandler, error) { + if hcs.GetHeaderByHashCalled != nil { + return hcs.GetHeaderByHashCalled(hash) + } + return nil, nil +} + +func (hcs *HeadersCacherStub) Clear() { + if hcs.ClearCalled != nil { + hcs.ClearCalled() + } +} + +func (hcs *HeadersCacherStub) RegisterHandler(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + if hcs.RegisterHandlerCalled != nil { + hcs.RegisterHandlerCalled(handler) + } +} + +func (hcs *HeadersCacherStub) Nonces(shardId uint32) []uint64 { + if hcs.NoncesCalled != nil { + return hcs.NoncesCalled(shardId) + } + return nil +} + +func (hcs *HeadersCacherStub) Len() int { + return 0 +} + +func (hcs *HeadersCacherStub) MaxSize() int { + return 100 +} + +func (hcs *HeadersCacherStub) IsInterfaceNil() bool { + return hcs == nil +} diff --git a/process/mock/interceptorProcessorStub.go b/process/mock/interceptorProcessorStub.go index 28ab048bab2..26ddeaaaf40 100644 --- a/process/mock/interceptorProcessorStub.go +++ b/process/mock/interceptorProcessorStub.go @@ -15,6 +15,9 @@ func (ips *InterceptorProcessorStub) Save(data process.InterceptedData) error { return ips.SaveCalled(data) } +func (ips *InterceptorProcessorStub) SignalEndOfProcessing(data []process.InterceptedData) { +} + func (ips *InterceptorProcessorStub) IsInterfaceNil() bool { if ips == nil { return true diff --git a/process/mock/intermediateTransactionHandlerMock.go b/process/mock/intermediateTransactionHandlerMock.go index 75ea84b3276..b65b9977302 100644 --- a/process/mock/intermediateTransactionHandlerMock.go +++ b/process/mock/intermediateTransactionHandlerMock.go @@ -63,6 +63,10 @@ func (ith *IntermediateTransactionHandlerMock) GetAllCurrentFinishedTxs() map[st return nil } +func (ith *IntermediateTransactionHandlerMock) GetCreatedInShardMiniBlock() *block.MiniBlock { + return &block.MiniBlock{} +} + // IsInterfaceNil returns true if there is no value under the interface func (ith *IntermediateTransactionHandlerMock) IsInterfaceNil() bool { if ith == nil { diff --git a/process/mock/metaPoolsHolderFake.go b/process/mock/metaPoolsHolderFake.go index 9cbcbe07a77..17b03eaa773 100644 --- a/process/mock/metaPoolsHolderFake.go +++ b/process/mock/metaPoolsHolderFake.go @@ -1,41 +1,35 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) type MetaPoolsHolderFake struct { - metaBlocks storage.Cacher - miniBlocks storage.Cacher - shardHeaders storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher - transactions dataRetriever.ShardedDataCacherNotifier - unsigned dataRetriever.ShardedDataCacherNotifier - currTxs dataRetriever.TransactionCacher + miniBlocks storage.Cacher + trieNodes storage.Cacher + shardHeaders dataRetriever.HeadersPool + transactions dataRetriever.ShardedDataCacherNotifier + unsigned dataRetriever.ShardedDataCacherNotifier + currTxs dataRetriever.TransactionCacher - MetaBlocksCalled func() storage.Cacher - ShardHeadersCalled func() storage.Cacher + ShardHeadersCalled func() dataRetriever.HeadersPool } func NewMetaPoolsHolderFake() *MetaPoolsHolderFake { mphf := &MetaPoolsHolderFake{} mphf.miniBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - mphf.transactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + mphf.transactions, _ = txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 10000, Shards: 1}) mphf.unsigned, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) - mphf.metaBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - mphf.shardHeaders, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - - cacheShardHdrNonces, _ := storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - mphf.headersNonces, _ = dataPool.NewNonceSyncMapCacher( - cacheShardHdrNonces, - uint64ByteSlice.NewBigEndianConverter(), - ) mphf.currTxs, _ = dataPool.NewCurrentBlockPool() + mphf.shardHeaders, _ = headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) + mphf.trieNodes, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) return mphf } @@ -52,28 +46,21 @@ func (mphf *MetaPoolsHolderFake) UnsignedTransactions() dataRetriever.ShardedDat return mphf.unsigned } -func (mphf *MetaPoolsHolderFake) MetaBlocks() storage.Cacher { - if mphf.MetaBlocksCalled != nil { - return mphf.MetaBlocksCalled() - } - return mphf.metaBlocks -} - func (mphf *MetaPoolsHolderFake) MiniBlocks() storage.Cacher { return mphf.miniBlocks } -func (mphf *MetaPoolsHolderFake) ShardHeaders() storage.Cacher { +func (mphf *MetaPoolsHolderFake) TrieNodes() storage.Cacher { + return mphf.trieNodes +} + +func (mphf *MetaPoolsHolderFake) Headers() dataRetriever.HeadersPool { if mphf.ShardHeadersCalled != nil { return mphf.ShardHeadersCalled() } return mphf.shardHeaders } -func (mphf *MetaPoolsHolderFake) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return mphf.headersNonces -} - // IsInterfaceNil returns true if there is no value under the interface func (mphf *MetaPoolsHolderFake) IsInterfaceNil() bool { if mphf == nil { diff --git a/process/mock/metaPoolsHolderStub.go b/process/mock/metaPoolsHolderStub.go index 41d87fe259f..41de658af9e 100644 --- a/process/mock/metaPoolsHolderStub.go +++ b/process/mock/metaPoolsHolderStub.go @@ -6,10 +6,9 @@ import ( ) type MetaPoolsHolderStub struct { - MetaBlocksCalled func() storage.Cacher MiniBlocksCalled func() storage.Cacher - ShardHeadersCalled func() storage.Cacher - HeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher + HeadersCalled func() dataRetriever.HeadersPool + TrieNodesCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier CurrBlockTxsCalled func() dataRetriever.TransactionCacher @@ -27,20 +26,16 @@ func (mphs *MetaPoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDat return mphs.UnsignedTransactionsCalled() } -func (mphs *MetaPoolsHolderStub) MetaBlocks() storage.Cacher { - return mphs.MetaBlocksCalled() -} - func (mphs *MetaPoolsHolderStub) MiniBlocks() storage.Cacher { return mphs.MiniBlocksCalled() } -func (mphs *MetaPoolsHolderStub) ShardHeaders() storage.Cacher { - return mphs.ShardHeadersCalled() +func (mphs *MetaPoolsHolderStub) Headers() dataRetriever.HeadersPool { + return mphs.HeadersCalled() } -func (mphs *MetaPoolsHolderStub) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return mphs.HeadersNoncesCalled() +func (mphs *MetaPoolsHolderStub) TrieNodes() storage.Cacher { + return mphs.TrieNodesCalled() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/mock/multiSigMock.go b/process/mock/multiSigMock.go index 7b3c2bc8633..0828b2847e5 100644 --- a/process/mock/multiSigMock.go +++ b/process/mock/multiSigMock.go @@ -31,6 +31,7 @@ type BelNevMock struct { StoreCommitmentMock func(index uint16, value []byte) error StoreCommitmentHashMock func(uint16, []byte) error CommitmentMock func(uint16) ([]byte, error) + CreateMock func(pubKeys []string, index uint16) (crypto.MultiSigner, error) } func NewMultiSigner() *BelNevMock { @@ -44,6 +45,9 @@ func NewMultiSigner() *BelNevMock { // Create resets the multiSigner and initializes corresponding fields with the given params func (bnm *BelNevMock) Create(pubKeys []string, index uint16) (crypto.MultiSigner, error) { + if bnm.CreateMock != nil { + return bnm.CreateMock(pubKeys, index) + } multiSig := NewMultiSigner() multiSig.selfId = index diff --git a/process/mock/peerAccountHandlerMock.go b/process/mock/peerAccountHandlerMock.go index 6bc9cbf7645..a135d9796cc 100644 --- a/process/mock/peerAccountHandlerMock.go +++ b/process/mock/peerAccountHandlerMock.go @@ -16,9 +16,14 @@ type PeerAccountHandlerMock struct { tracker state.AccountTracker trackableDataTrie state.DataTrieTracker - SetNonceWithJournalCalled func(nonce uint64) error `json:"-"` - SetCodeHashWithJournalCalled func(codeHash []byte) error `json:"-"` - SetCodeWithJournalCalled func(codeHash []byte) error `json:"-"` + SetNonceWithJournalCalled func(nonce uint64) error + SetCodeHashWithJournalCalled func(codeHash []byte) error + SetRootHashWithJournalCalled func([]byte) error + RatingCalled func() uint32 + SetCodeWithJournalCalled func(codeHash []byte) error + SetRatingWithJournalCalled func(rating uint32) error + TempRatingCalled func() uint32 + SetTempRatingWithJournalCalled func(rating uint32) error IncreaseLeaderSuccessRateWithJournalCalled func() error DecreaseLeaderSuccessRateWithJournalCalled func() error @@ -115,6 +120,34 @@ func (pahm *PeerAccountHandlerMock) DecreaseValidatorSuccessRateWithJournal() er return nil } +func (pahm *PeerAccountHandlerMock) GetRating() uint32 { + if pahm.SetRatingWithJournalCalled != nil { + return pahm.RatingCalled() + } + return 10 +} + +func (pahm *PeerAccountHandlerMock) SetRatingWithJournal(rating uint32) error { + if pahm.SetRatingWithJournalCalled != nil { + return pahm.SetRatingWithJournalCalled(rating) + } + return nil +} + +func (pahm *PeerAccountHandlerMock) GetTempRating() uint32 { + if pahm.TempRatingCalled != nil { + return pahm.TempRatingCalled() + } + return 10 +} + +func (pahm *PeerAccountHandlerMock) SetTempRatingWithJournal(rating uint32) error { + if pahm.SetTempRatingWithJournalCalled != nil { + return pahm.SetTempRatingWithJournalCalled(rating) + } + return nil +} + func (pahm *PeerAccountHandlerMock) IsInterfaceNil() bool { if pahm == nil { return true diff --git a/process/mock/pendingMiniBlocksHandlerStub.go b/process/mock/pendingMiniBlocksHandlerStub.go new file mode 100644 index 00000000000..2abc7e96155 --- /dev/null +++ b/process/mock/pendingMiniBlocksHandlerStub.go @@ -0,0 +1,37 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" +) + +type PendingMiniBlocksHandlerStub struct { + PendingMiniBlockHeadersCalled func(lastNotarizedHeaders []data.HeaderHandler) ([]block.ShardMiniBlockHeader, error) + AddProcessedHeaderCalled func(handler data.HeaderHandler) error + RevertHeaderCalled func(handler data.HeaderHandler) error +} + +func (p *PendingMiniBlocksHandlerStub) PendingMiniBlockHeaders(lastNotarizedHeaders []data.HeaderHandler) ([]block.ShardMiniBlockHeader, error) { + if p.PendingMiniBlockHeadersCalled != nil { + return p.PendingMiniBlockHeadersCalled(lastNotarizedHeaders) + } + return nil, nil +} + +func (p *PendingMiniBlocksHandlerStub) AddProcessedHeader(handler data.HeaderHandler) error { + if p.AddProcessedHeaderCalled != nil { + return p.AddProcessedHeaderCalled(handler) + } + return nil +} + +func (p *PendingMiniBlocksHandlerStub) RevertHeader(handler data.HeaderHandler) error { + if p.RevertHeaderCalled != nil { + return p.RevertHeaderCalled(handler) + } + return nil +} + +func (p *PendingMiniBlocksHandlerStub) IsInterfaceNil() bool { + return p == nil +} diff --git a/process/mock/poolsHolderMock.go b/process/mock/poolsHolderMock.go index 6fad667474a..2c54214c980 100644 --- a/process/mock/poolsHolderMock.go +++ b/process/mock/poolsHolderMock.go @@ -1,10 +1,12 @@ package mock import ( - "github.com/ElrondNetwork/elrond-go/data/typeConverters/uint64ByteSlice" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" + "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool/headersCache" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" + "github.com/ElrondNetwork/elrond-go/dataRetriever/txpool" "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) @@ -13,35 +15,23 @@ type PoolsHolderMock struct { transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier rewardTransactions dataRetriever.ShardedDataCacherNotifier - headers storage.Cacher - metaBlocks storage.Cacher - hdrNonces dataRetriever.Uint64SyncMapCacher + headers dataRetriever.HeadersPool miniBlocks storage.Cacher peerChangesBlocks storage.Cacher - metaHdrNonces dataRetriever.Uint64SyncMapCacher + trieNodes storage.Cacher currBlockTxs dataRetriever.TransactionCacher } func NewPoolsHolderMock() *PoolsHolderMock { phf := &PoolsHolderMock{} - phf.transactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.transactions, _ = txpool.NewShardedTxPool(storageUnit.CacheConfig{Size: 10000, Shards: 16}) phf.unsignedTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) phf.rewardTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) - phf.headers, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - phf.metaBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - cacheHdrNonces, _ := storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - phf.hdrNonces, _ = dataPool.NewNonceSyncMapCacher( - cacheHdrNonces, - uint64ByteSlice.NewBigEndianConverter(), - ) - cacheMetaHdrNonces, _ := storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) - phf.metaHdrNonces, _ = dataPool.NewNonceSyncMapCacher( - cacheMetaHdrNonces, - uint64ByteSlice.NewBigEndianConverter(), - ) + phf.headers, _ = headersCache.NewHeadersPool(config.HeadersPoolConfig{MaxHeadersPerShard: 1000, NumElementsToRemoveOnEviction: 100}) phf.miniBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) phf.peerChangesBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) phf.currBlockTxs, _ = dataPool.NewCurrentBlockPool() + phf.trieNodes, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) return phf } @@ -62,14 +52,10 @@ func (phm *PoolsHolderMock) RewardTransactions() dataRetriever.ShardedDataCacher return phm.rewardTransactions } -func (phm *PoolsHolderMock) Headers() storage.Cacher { +func (phm *PoolsHolderMock) Headers() dataRetriever.HeadersPool { return phm.headers } -func (phm *PoolsHolderMock) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phm.hdrNonces -} - func (phm *PoolsHolderMock) MiniBlocks() storage.Cacher { return phm.miniBlocks } @@ -78,14 +64,6 @@ func (phm *PoolsHolderMock) PeerChangesBlocks() storage.Cacher { return phm.peerChangesBlocks } -func (phm *PoolsHolderMock) MetaBlocks() storage.Cacher { - return phm.metaBlocks -} - -func (phm *PoolsHolderMock) MetaHeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phm.metaHdrNonces -} - func (phm *PoolsHolderMock) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { phm.transactions = transactions } @@ -94,6 +72,10 @@ func (phm *PoolsHolderMock) SetUnsignedTransactions(scrs dataRetriever.ShardedDa phm.unsignedTransactions = scrs } +func (phm *PoolsHolderMock) TrieNodes() storage.Cacher { + return phm.trieNodes +} + // IsInterfaceNil returns true if there is no value under the interface func (phf *PoolsHolderMock) IsInterfaceNil() bool { if phf == nil { diff --git a/process/mock/poolsHolderStub.go b/process/mock/poolsHolderStub.go index 35a1d5e92db..6d9a3fea977 100644 --- a/process/mock/poolsHolderStub.go +++ b/process/mock/poolsHolderStub.go @@ -6,14 +6,14 @@ import ( ) type PoolsHolderStub struct { - HeadersCalled func() storage.Cacher - HeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher + HeadersCalled func() dataRetriever.HeadersPool PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher + TrieNodesCalled func() storage.Cacher CurrBlockTxsCalled func() dataRetriever.TransactionCacher } @@ -21,14 +21,10 @@ func (phs *PoolsHolderStub) CurrentBlockTxs() dataRetriever.TransactionCacher { return phs.CurrBlockTxsCalled() } -func (phs *PoolsHolderStub) Headers() storage.Cacher { +func (phs *PoolsHolderStub) Headers() dataRetriever.HeadersPool { return phs.HeadersCalled() } -func (phs *PoolsHolderStub) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phs.HeadersNoncesCalled() -} - func (phs *PoolsHolderStub) PeerChangesBlocks() storage.Cacher { return phs.PeerChangesBlocksCalled() } @@ -53,6 +49,10 @@ func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacher return phs.RewardTransactionsCalled() } +func (phs *PoolsHolderStub) TrieNodes() storage.Cacher { + return phs.TrieNodesCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/process/mock/preprocessorMock.go b/process/mock/preprocessorMock.go index 10c607fd714..40590ca3feb 100644 --- a/process/mock/preprocessorMock.go +++ b/process/mock/preprocessorMock.go @@ -108,18 +108,6 @@ func (ppm *PreProcessorMock) CreateAndProcessMiniBlocks( return ppm.CreateAndProcessMiniBlocksCalled(maxTxSpaceRemained, maxMbSpaceRemained, haveTime) } -func (ppm *PreProcessorMock) CreateAndProcessMiniBlock( - senderShardId, receiverShardId uint32, - spaceRemained int, - haveTime func() bool, -) (*block.MiniBlock, error) { - - if ppm.CreateAndProcessMiniBlockCalled == nil { - return nil, nil - } - return ppm.CreateAndProcessMiniBlockCalled(senderShardId, receiverShardId, spaceRemained, haveTime) -} - func (ppm *PreProcessorMock) GetAllCurrentUsedTxs() map[string]data.TransactionHandler { if ppm.GetAllCurrentUsedTxsCalled == nil { return nil diff --git a/process/mock/raterMock.go b/process/mock/raterMock.go new file mode 100644 index 00000000000..2d293d05cb1 --- /dev/null +++ b/process/mock/raterMock.go @@ -0,0 +1,94 @@ +package mock + +import "github.com/ElrondNetwork/elrond-go/sharding" + +type RaterMock struct { + StartRating uint32 + MinRating uint32 + MaxRating uint32 + IncreaseProposer uint32 + DecreaseProposer uint32 + IncreaseValidator uint32 + DecreaseValidator uint32 + + GetRatingCalled func(string) uint32 + GetStartRatingCalled func() uint32 + ComputeIncreaseProposerCalled func(val uint32) uint32 + ComputeDecreaseProposerCalled func(val uint32) uint32 + ComputeIncreaseValidatorCalled func(val uint32) uint32 + ComputeDecreaseValidatorCalled func(val uint32) uint32 + RatingReader sharding.RatingReader +} + +func GetNewMockRater() *RaterMock { + raterMock := &RaterMock{} + raterMock.GetRatingCalled = func(s string) uint32 { + return raterMock.StartRating + } + + raterMock.GetStartRatingCalled = func() uint32 { + return raterMock.StartRating + } + raterMock.ComputeIncreaseProposerCalled = func(val uint32) uint32 { + return raterMock.computeRating(val, int32(raterMock.IncreaseProposer)) + } + raterMock.ComputeDecreaseProposerCalled = func(val uint32) uint32 { + return raterMock.computeRating(val, int32(0-raterMock.DecreaseProposer)) + } + raterMock.ComputeIncreaseValidatorCalled = func(val uint32) uint32 { + return raterMock.computeRating(val, int32(raterMock.IncreaseValidator)) + } + raterMock.ComputeDecreaseValidatorCalled = func(val uint32) uint32 { + return raterMock.computeRating(val, int32(0-raterMock.DecreaseValidator)) + } + + return raterMock +} + +func (rm *RaterMock) computeRating(val uint32, ratingStep int32) uint32 { + newVal := int64(val) + int64(ratingStep) + if newVal < int64(rm.MinRating) { + return rm.MinRating + } + if newVal > int64(rm.MaxRating) { + return rm.MaxRating + } + + return uint32(newVal) +} + +func (rm *RaterMock) GetRating(pk string) uint32 { + return rm.GetRatingCalled(pk) +} + +func (rm *RaterMock) GetRatings([]string) map[string]uint32 { + return make(map[string]uint32) +} + +func (rm *RaterMock) GetStartRating() uint32 { + return rm.GetStartRatingCalled() +} + +func (rm *RaterMock) ComputeIncreaseProposer(val uint32) uint32 { + return rm.ComputeIncreaseProposerCalled(val) +} + +func (rm *RaterMock) ComputeDecreaseProposer(val uint32) uint32 { + return rm.ComputeDecreaseProposerCalled(val) +} + +func (rm *RaterMock) ComputeIncreaseValidator(val uint32) uint32 { + return rm.ComputeIncreaseValidatorCalled(val) +} + +func (rm *RaterMock) ComputeDecreaseValidator(val uint32) uint32 { + return rm.ComputeDecreaseValidatorCalled(val) +} + +func (rm *RaterMock) SetRatingReader(reader sharding.RatingReader) { + rm.RatingReader = reader +} + +func (rm *RaterMock) IsInterfaceNil() bool { + return rm == nil +} diff --git a/process/mock/ratingReaderMock.go b/process/mock/ratingReaderMock.go new file mode 100644 index 00000000000..bdc54bd0c82 --- /dev/null +++ b/process/mock/ratingReaderMock.go @@ -0,0 +1,27 @@ +package mock + +type RatingReaderMock struct { + GetRatingCalled func(string) uint32 + GetRatingsCalled func([]string) map[string]uint32 + RatingsMap map[string]uint32 +} + +func (rrm *RatingReaderMock) GetRating(pk string) uint32 { + if rrm.GetRatingCalled != nil { + return rrm.GetRatingCalled(pk) + } + + return 0 +} + +func (rrm *RatingReaderMock) GetRatings(pks []string) map[string]uint32 { + if rrm.GetRatingsCalled != nil { + return rrm.GetRatingsCalled(pks) + } + + return map[string]uint32{} +} + +func (rrm *RatingReaderMock) IsInterfaceNil() bool { + return rrm == nil +} diff --git a/process/mock/requestHandlerMock.go b/process/mock/requestHandlerMock.go deleted file mode 100644 index 0ebe0e160d0..00000000000 --- a/process/mock/requestHandlerMock.go +++ /dev/null @@ -1,60 +0,0 @@ -package mock - -type RequestHandlerMock struct { - RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) - RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) - RequestHeaderHandlerCalled func(destShardID uint32, hash []byte) - RequestHeaderHandlerByNonceCalled func(destShardID uint32, nonce uint64) -} - -func (rrh *RequestHandlerMock) RequestTransaction(destShardID uint32, txHashes [][]byte) { - if rrh.RequestTransactionHandlerCalled == nil { - return - } - rrh.RequestTransactionHandlerCalled(destShardID, txHashes) -} - -func (rrh *RequestHandlerMock) RequestUnsignedTransactions(destShardID uint32, txHashes [][]byte) { - if rrh.RequestScrHandlerCalled == nil { - return - } - rrh.RequestScrHandlerCalled(destShardID, txHashes) -} - -func (rrh *RequestHandlerMock) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) { - if rrh.RequestRewardTxHandlerCalled == nil { - return - } - rrh.RequestRewardTxHandlerCalled(destShardID, txHashes) -} - -func (rrh *RequestHandlerMock) RequestMiniBlock(shardId uint32, miniblockHash []byte) { - if rrh.RequestMiniBlockHandlerCalled == nil { - return - } - rrh.RequestMiniBlockHandlerCalled(shardId, miniblockHash) -} - -func (rrh *RequestHandlerMock) RequestHeader(shardId uint32, hash []byte) { - if rrh.RequestHeaderHandlerCalled == nil { - return - } - rrh.RequestHeaderHandlerCalled(shardId, hash) -} - -func (rrh *RequestHandlerMock) RequestHeaderByNonce(destShardID uint32, nonce uint64) { - if rrh.RequestHeaderHandlerByNonceCalled == nil { - return - } - rrh.RequestHeaderHandlerByNonceCalled(destShardID, nonce) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (rrh *RequestHandlerMock) IsInterfaceNil() bool { - if rrh == nil { - return true - } - return false -} diff --git a/process/mock/requestHandlerStub.go b/process/mock/requestHandlerStub.go new file mode 100644 index 00000000000..a5d1a88d41d --- /dev/null +++ b/process/mock/requestHandlerStub.go @@ -0,0 +1,81 @@ +package mock + +type RequestHandlerStub struct { + RequestShardHeaderCalled func(shardId uint32, hash []byte) + RequestMetaHeaderCalled func(hash []byte) + RequestMetaHeaderByNonceCalled func(nonce uint64) + RequestShardHeaderByNonceCalled func(shardId uint32, nonce uint64) + RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) + RequestTrieNodesCalled func(shardId uint32, hash []byte) +} + +func (rhs *RequestHandlerStub) RequestShardHeader(shardId uint32, hash []byte) { + if rhs.RequestShardHeaderCalled == nil { + return + } + rhs.RequestShardHeaderCalled(shardId, hash) +} + +func (rhs *RequestHandlerStub) RequestMetaHeader(hash []byte) { + if rhs.RequestMetaHeaderCalled == nil { + return + } + rhs.RequestMetaHeaderCalled(hash) +} + +func (rhs *RequestHandlerStub) RequestMetaHeaderByNonce(nonce uint64) { + if rhs.RequestMetaHeaderByNonceCalled == nil { + return + } + rhs.RequestMetaHeaderByNonceCalled(nonce) +} + +func (rhs *RequestHandlerStub) RequestShardHeaderByNonce(shardId uint32, nonce uint64) { + if rhs.RequestShardHeaderByNonceCalled == nil { + return + } + rhs.RequestShardHeaderByNonceCalled(shardId, nonce) +} + +func (rhs *RequestHandlerStub) RequestTransaction(destShardID uint32, txHashes [][]byte) { + if rhs.RequestTransactionHandlerCalled == nil { + return + } + rhs.RequestTransactionHandlerCalled(destShardID, txHashes) +} + +func (rhs *RequestHandlerStub) RequestUnsignedTransactions(destShardID uint32, txHashes [][]byte) { + if rhs.RequestScrHandlerCalled == nil { + return + } + rhs.RequestScrHandlerCalled(destShardID, txHashes) +} + +func (rhs *RequestHandlerStub) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) { + if rhs.RequestRewardTxHandlerCalled == nil { + return + } + rhs.RequestRewardTxHandlerCalled(destShardID, txHashes) +} + +func (rhs *RequestHandlerStub) RequestMiniBlock(shardId uint32, miniblockHash []byte) { + if rhs.RequestMiniBlockHandlerCalled == nil { + return + } + rhs.RequestMiniBlockHandlerCalled(shardId, miniblockHash) +} + +func (rhs *RequestHandlerStub) RequestTrieNodes(shardId uint32, miniblockHash []byte) { + if rhs.RequestTrieNodesCalled == nil { + return + } + rhs.RequestTrieNodesCalled(shardId, miniblockHash) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rhs *RequestHandlerStub) IsInterfaceNil() bool { + return rhs == nil +} diff --git a/process/mock/shardedDataStub.go b/process/mock/shardedDataStub.go index 688a94904dd..3fa0868838e 100644 --- a/process/mock/shardedDataStub.go +++ b/process/mock/shardedDataStub.go @@ -47,10 +47,6 @@ func (sd *ShardedDataStub) MergeShardStores(sourceCacheId, destCacheId string) { sd.MergeShardStoresCalled(sourceCacheId, destCacheId) } -func (sd *ShardedDataStub) MoveData(sourceCacheId, destCacheId string, key [][]byte) { - sd.MoveDataCalled(sourceCacheId, destCacheId, key) -} - func (sd *ShardedDataStub) Clear() { sd.ClearCalled() } diff --git a/process/mock/transactionCoordinatorMock.go b/process/mock/transactionCoordinatorMock.go index 3769e30b714..c3c078a1053 100644 --- a/process/mock/transactionCoordinatorMock.go +++ b/process/mock/transactionCoordinatorMock.go @@ -22,7 +22,11 @@ type TransactionCoordinatorMock struct { CreateMbsAndProcessTransactionsFromMeCalled func(maxTxRemaining uint32, maxMbRemaining uint32, haveTime func() bool) block.MiniBlockSlice CreateMarshalizedDataCalled func(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) GetAllCurrentUsedTxsCalled func(blockType block.Type) map[string]data.TransactionHandler - VerifyCreatedBlockTransactionsCalled func(body block.Body) error + VerifyCreatedBlockTransactionsCalled func(hdr data.HeaderHandler, body block.Body) error +} + +func (tcm *TransactionCoordinatorMock) CreateReceiptsHash() ([]byte, error) { + return []byte("receiptHash"), nil } func (tcm *TransactionCoordinatorMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { @@ -129,18 +133,15 @@ func (tcm *TransactionCoordinatorMock) GetAllCurrentUsedTxs(blockType block.Type return tcm.GetAllCurrentUsedTxsCalled(blockType) } -func (tcm *TransactionCoordinatorMock) VerifyCreatedBlockTransactions(body block.Body) error { +func (tcm *TransactionCoordinatorMock) VerifyCreatedBlockTransactions(hdr data.HeaderHandler, body block.Body) error { if tcm.VerifyCreatedBlockTransactionsCalled == nil { return nil } - return tcm.VerifyCreatedBlockTransactionsCalled(body) + return tcm.VerifyCreatedBlockTransactionsCalled(hdr, body) } // IsInterfaceNil returns true if there is no value under the interface func (tcm *TransactionCoordinatorMock) IsInterfaceNil() bool { - if tcm == nil { - return true - } - return false + return tcm == nil } diff --git a/process/mock/trieStub.go b/process/mock/trieStub.go new file mode 100644 index 00000000000..1e5699a80fa --- /dev/null +++ b/process/mock/trieStub.go @@ -0,0 +1,154 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type TrieStub struct { + GetCalled func(key []byte) ([]byte, error) + UpdateCalled func(key, value []byte) error + DeleteCalled func(key []byte) error + RootCalled func() ([]byte, error) + ProveCalled func(key []byte) ([][]byte, error) + VerifyProofCalled func(proofs [][]byte, key []byte) (bool, error) + CommitCalled func() error + RecreateCalled func(root []byte) (data.Trie, error) + DeepCloneCalled func() (data.Trie, error) + CancelPruneCalled func(rootHash []byte, identifier data.TriePruningIdentifier) + PruneCalled func(rootHash []byte, identifier data.TriePruningIdentifier) error + ResetOldHashesCalled func() [][]byte + AppendToOldHashesCalled func([][]byte) + SnapshotCalled func() error + GetSerializedNodesCalled func([]byte, uint64) ([][]byte, error) + DatabaseCalled func() data.DBWriteCacher +} + +func (ts *TrieStub) Get(key []byte) ([]byte, error) { + if ts.GetCalled != nil { + return ts.GetCalled(key) + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) Update(key, value []byte) error { + if ts.UpdateCalled != nil { + return ts.UpdateCalled(key, value) + } + + return errNotImplemented +} + +func (ts *TrieStub) Delete(key []byte) error { + if ts.DeleteCalled != nil { + return ts.DeleteCalled(key) + } + + return errNotImplemented +} + +func (ts *TrieStub) Root() ([]byte, error) { + if ts.RootCalled != nil { + return ts.RootCalled() + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) Prove(key []byte) ([][]byte, error) { + if ts.ProveCalled != nil { + return ts.ProveCalled(key) + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) VerifyProof(proofs [][]byte, key []byte) (bool, error) { + if ts.VerifyProofCalled != nil { + return ts.VerifyProofCalled(proofs, key) + } + + return false, errNotImplemented +} + +func (ts *TrieStub) Commit() error { + if ts != nil { + return ts.CommitCalled() + } + + return errNotImplemented +} + +func (ts *TrieStub) Recreate(root []byte) (data.Trie, error) { + if ts.RecreateCalled != nil { + return ts.RecreateCalled(root) + } + + return nil, errNotImplemented +} + +func (ts *TrieStub) String() string { + return "stub trie" +} + +func (ts *TrieStub) DeepClone() (data.Trie, error) { + return ts.DeepCloneCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ts *TrieStub) IsInterfaceNil() bool { + return ts == nil +} + +// CancelPrune invalidates the hashes that correspond to the given root hash from the eviction waiting list +func (ts *TrieStub) CancelPrune(rootHash []byte, identifier data.TriePruningIdentifier) { + if ts.CancelPruneCalled != nil { + ts.CancelPruneCalled(rootHash, identifier) + } +} + +// Prune removes from the database all the old hashes that correspond to the given root hash +func (ts *TrieStub) Prune(rootHash []byte, identifier data.TriePruningIdentifier) error { + if ts.PruneCalled != nil { + return ts.PruneCalled(rootHash, identifier) + } + + return errNotImplemented +} + +// ResetOldHashes resets the oldHashes and oldRoot variables and returns the old hashes +func (ts *TrieStub) ResetOldHashes() [][]byte { + if ts.ResetOldHashesCalled != nil { + return ts.ResetOldHashesCalled() + } + + return nil +} + +// AppendToOldHashes appends the given hashes to the trie's oldHashes variable +func (ts *TrieStub) AppendToOldHashes(hashes [][]byte) { + if ts.AppendToOldHashesCalled != nil { + ts.AppendToOldHashesCalled(hashes) + } +} + +func (ts *TrieStub) Snapshot() error { + if ts.SnapshotCalled != nil { + return ts.SnapshotCalled() + } + return nil +} + +func (ts *TrieStub) GetSerializedNodes(hash []byte, maxBuffToSend uint64) ([][]byte, error) { + if ts.GetSerializedNodesCalled != nil { + return ts.GetSerializedNodesCalled(hash, maxBuffToSend) + } + return nil, nil +} + +func (ts *TrieStub) Database() data.DBWriteCacher { + if ts.DatabaseCalled != nil { + return ts.DatabaseCalled() + } + return nil +} diff --git a/process/peer/export_test.go b/process/peer/export_test.go index 373869db1aa..ebb9a13a860 100644 --- a/process/peer/export_test.go +++ b/process/peer/export_test.go @@ -7,37 +7,37 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -func (p *validatorStatistics) CheckForMissedBlocks( +func (vs *validatorStatistics) CheckForMissedBlocks( currentHeaderRound uint64, previousHeaderRound uint64, prevRandSeed []byte, shardId uint32, ) error { - return p.checkForMissedBlocks(currentHeaderRound, previousHeaderRound, prevRandSeed ,shardId) + return vs.checkForMissedBlocks(currentHeaderRound, previousHeaderRound, prevRandSeed, shardId) } -func (p *validatorStatistics) SaveInitialState(in []*sharding.InitialNode, stakeValue *big.Int) error { - return p.saveInitialState(in, stakeValue) +func (vs *validatorStatistics) SaveInitialState(in []*sharding.InitialNode, stakeValue *big.Int, initialRating uint32) error { + return vs.saveInitialState(in, stakeValue, initialRating) } -func (p *validatorStatistics) GetMatchingPrevShardData(currentShardData block.ShardData, shardInfo []block.ShardData) *block.ShardData { - return p.getMatchingPrevShardData(currentShardData, shardInfo) +func (vs *validatorStatistics) GetMatchingPrevShardData(currentShardData block.ShardData, shardInfo []block.ShardData) *block.ShardData { + return vs.getMatchingPrevShardData(currentShardData, shardInfo) } -func (p *validatorStatistics) LoadPreviousShardHeaders(currentHeader, previousHeader *block.MetaBlock) error { - return p.loadPreviousShardHeaders(currentHeader, previousHeader) +func (vs *validatorStatistics) LoadPreviousShardHeaders(currentHeader, previousHeader *block.MetaBlock) error { + return vs.loadPreviousShardHeaders(currentHeader, previousHeader) } -func (p *validatorStatistics) LoadPreviousShardHeadersMeta(currentHeader, previousHeader *block.MetaBlock) error { - return p.loadPreviousShardHeadersMeta(currentHeader) +func (vs *validatorStatistics) LoadPreviousShardHeadersMeta(currentHeader, _ *block.MetaBlock) error { + return vs.loadPreviousShardHeadersMeta(currentHeader) } -func (p *validatorStatistics) PrevShardInfo() map[string]block.ShardData { - p.mutPrevShardInfo.RLock() - defer p.mutPrevShardInfo.RUnlock() - return p.prevShardInfo +func (vs *validatorStatistics) PrevShardInfo() map[string]block.ShardData { + vs.mutPrevShardInfo.RLock() + defer vs.mutPrevShardInfo.RUnlock() + return vs.prevShardInfo } -func (p *validatorStatistics) BuildShardDataKey(sh block.ShardData) string { - return p.buildShardDataKey(sh) +func (vs *validatorStatistics) BuildShardDataKey(sh block.ShardData) string { + return vs.buildShardDataKey(sh) } diff --git a/process/peer/interface.go b/process/peer/interface.go index 24e5c814753..fe46efa410b 100644 --- a/process/peer/interface.go +++ b/process/peer/interface.go @@ -2,12 +2,12 @@ package peer import ( "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/dataRetriever" ) // DataPool indicates the main functionality needed in order to fetch the required blocks from the pool type DataPool interface { - MetaBlocks() storage.Cacher + Headers() dataRetriever.HeadersPool IsInterfaceNil() bool } diff --git a/process/peer/mediators.go b/process/peer/mediators.go index a3d19aa6821..d09cc07e6f2 100644 --- a/process/peer/mediators.go +++ b/process/peer/mediators.go @@ -21,7 +21,7 @@ func (lh *shardMediator) loadPreviousShardHeaders(header, previousHeader *block. return lh.vs.loadPreviousShardHeaders(header, previousHeader) } -func (lh *metaMediator) loadPreviousShardHeaders(header, previousHeader *block.MetaBlock) error { +func (lh *metaMediator) loadPreviousShardHeaders(header, _ *block.MetaBlock) error { if lh.vs == nil { return process.ErrNilMediator } diff --git a/process/peer/process.go b/process/peer/process.go index 933e5b74ef7..8575ab7878a 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -6,20 +6,24 @@ import ( "math/big" "sync" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/sharding" ) +var log = logger.GetOrCreate("process/peer") + // ArgValidatorStatisticsProcessor holds all dependencies for the validatorStatistics type ArgValidatorStatisticsProcessor struct { InitialNodes []*sharding.InitialNode - Economics *economics.EconomicsData + StakeValue *big.Int Marshalizer marshal.Marshalizer NodesCoordinator sharding.NodesCoordinator ShardCoordinator sharding.Coordinator @@ -27,6 +31,7 @@ type ArgValidatorStatisticsProcessor struct { StorageService dataRetriever.StorageService AdrConv state.AddressConverter PeerAdapter state.AccountsAdapter + Rater sharding.RaterHandler } type validatorStatistics struct { @@ -40,6 +45,8 @@ type validatorStatistics struct { prevShardInfo map[string]block.ShardData mutPrevShardInfo sync.RWMutex mediator shardMetaMediator + rater sharding.RaterHandler + initialNodes []*sharding.InitialNode } // NewValidatorStatisticsProcessor instantiates a new validatorStatistics structure responsible of keeping account of @@ -66,11 +73,11 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) if arguments.Marshalizer == nil || arguments.Marshalizer.IsInterfaceNil() { return nil, process.ErrNilMarshalizer } - if arguments.Economics == nil { + if arguments.StakeValue == nil { return nil, process.ErrNilEconomicsData } - if arguments.Economics.StakeValue() == nil { - return nil, process.ErrNilEconomicsData + if check.IfNil(arguments.Rater) { + return nil, process.ErrNilRater } vs := &validatorStatistics{ @@ -82,10 +89,27 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) storageService: arguments.StorageService, marshalizer: arguments.Marshalizer, prevShardInfo: make(map[string]block.ShardData), + rater: arguments.Rater, } vs.mediator = vs.createMediator() - err := vs.saveInitialState(arguments.InitialNodes, arguments.Economics.StakeValue()) + rater := arguments.Rater + ratingReaderSetter, ok := rater.(sharding.RatingReaderSetter) + + if !ok { + return nil, process.ErrNilRatingReader + } + log.Debug("setting ratingReader") + + rr := &RatingReader{ + getRating: vs.getRating, + } + + ratingReaderSetter.SetRatingReader(rr) + + vs.initialNodes = arguments.InitialNodes + + err := vs.saveInitialState(vs.initialNodes, arguments.StakeValue, rater.GetStartRating()) if err != nil { return nil, err } @@ -94,25 +118,31 @@ func NewValidatorStatisticsProcessor(arguments ArgValidatorStatisticsProcessor) } // saveInitialState takes an initial peer list, validates it and sets up the initial state for each of the peers -func (p *validatorStatistics) saveInitialState(in []*sharding.InitialNode, stakeValue *big.Int) error { +func (vs *validatorStatistics) saveInitialState( + in []*sharding.InitialNode, + stakeValue *big.Int, + startRating uint32, +) error { for _, node := range in { - err := p.initializeNode(node, stakeValue) + err := vs.initializeNode(node, stakeValue, startRating) if err != nil { return err } } - _, err := p.peerAdapter.Commit() + hash, err := vs.peerAdapter.Commit() if err != nil { return err } + log.Trace("committed peer adapter", "root hash", core.ToHex(hash)) + return nil } // IsNodeValid calculates if a node that's present in the initial validator list // contains all the required information in order to be able to participate in consensus -func (p *validatorStatistics) IsNodeValid(node *sharding.InitialNode) bool { +func (vs *validatorStatistics) IsNodeValid(node *sharding.InitialNode) bool { if len(node.PubKey) == 0 { return false } @@ -123,8 +153,8 @@ func (p *validatorStatistics) IsNodeValid(node *sharding.InitialNode) bool { return true } -func (p *validatorStatistics) processPeerChanges(header data.HeaderHandler) error { - if p.shardCoordinator.SelfId() == sharding.MetachainShardId { +func (vs *validatorStatistics) processPeerChanges(header data.HeaderHandler) error { + if vs.shardCoordinator.SelfId() == sharding.MetachainShardId { return nil } @@ -134,7 +164,7 @@ func (p *validatorStatistics) processPeerChanges(header data.HeaderHandler) erro } for _, peerChange := range metaBlock.PeerInfo { - err := p.updatePeerState(peerChange) + err := vs.updatePeerState(peerChange) if err != nil { return err } @@ -143,19 +173,19 @@ func (p *validatorStatistics) processPeerChanges(header data.HeaderHandler) erro return nil } -func (p *validatorStatistics) updatePeerState( +func (vs *validatorStatistics) updatePeerState( peerChange block.PeerData, ) error { - adrSrc, err := p.adrConv.CreateAddressFromPublicKeyBytes(peerChange.Address) + adrSrc, err := vs.adrConv.CreateAddressFromPublicKeyBytes(peerChange.PublicKey) if err != nil { return err } if peerChange.Action == block.PeerDeregistration { - return p.peerAdapter.RemoveAccount(adrSrc) + return vs.peerAdapter.RemoveAccount(adrSrc) } - accHandler, err := p.peerAdapter.GetAccountWithJournal(adrSrc) + accHandler, err := vs.peerAdapter.GetAccountWithJournal(adrSrc) if err != nil { return err } @@ -165,6 +195,13 @@ func (p *validatorStatistics) updatePeerState( return process.ErrWrongTypeAssertion } + if !bytes.Equal(peerChange.Address, account.RewardAddress) { + err := account.SetRewardAddressWithJournal(peerChange.Address) + if err != nil { + return err + } + } + if !bytes.Equal(peerChange.PublicKey, account.BLSPublicKey) { err := account.SetBLSPublicKeyWithJournal(peerChange.PublicKey) if err != nil { @@ -181,7 +218,7 @@ func (p *validatorStatistics) updatePeerState( } } - if peerChange.Action == block.PeerRegistrantion && peerChange.TimeStamp != account.Nonce { + if peerChange.Action == block.PeerRegistration && peerChange.TimeStamp != account.Nonce { err := account.SetNonceWithJournal(peerChange.TimeStamp) if err != nil { return err @@ -205,22 +242,22 @@ func (p *validatorStatistics) updatePeerState( // UpdatePeerState takes a header, updates the peer state for all of the // consensus members and returns the new root hash -func (p *validatorStatistics) UpdatePeerState(header data.HeaderHandler) ([]byte, error) { +func (vs *validatorStatistics) UpdatePeerState(header data.HeaderHandler) ([]byte, error) { if header.GetNonce() == 0 { - return p.peerAdapter.RootHash() + return vs.peerAdapter.RootHash() } - err := p.processPeerChanges(header) + err := vs.processPeerChanges(header) if err != nil { return nil, err } - consensusGroup, err := p.nodesCoordinator.ComputeValidatorsGroup(header.GetPrevRandSeed(), header.GetRound(), header.GetShardID()) + consensusGroup, err := vs.nodesCoordinator.ComputeValidatorsGroup(header.GetPrevRandSeed(), header.GetRound(), header.GetShardID()) if err != nil { return nil, err } - err = p.updateValidatorInfo(consensusGroup, header.GetShardID()) + err = vs.updateValidatorInfo(consensusGroup) if err != nil { return nil, err } @@ -228,15 +265,15 @@ func (p *validatorStatistics) UpdatePeerState(header data.HeaderHandler) ([]byte // TODO: This should be removed when we have the genesis block in the storage also // and make sure to calculate gaps for the first block also if header.GetNonce() == 1 { - return p.peerAdapter.RootHash() + return vs.peerAdapter.RootHash() } - previousHeader, err := process.GetMetaHeader(header.GetPrevHash(), p.dataPool.MetaBlocks(), p.marshalizer, p.storageService) + previousHeader, err := process.GetMetaHeader(header.GetPrevHash(), vs.dataPool.Headers(), vs.marshalizer, vs.storageService) if err != nil { return nil, err } - err = p.checkForMissedBlocks( + err = vs.checkForMissedBlocks( header.GetRound(), previousHeader.GetRound(), previousHeader.GetPrevRandSeed(), @@ -246,25 +283,33 @@ func (p *validatorStatistics) UpdatePeerState(header data.HeaderHandler) ([]byte return nil, err } - err = p.updateShardDataPeerState(header, previousHeader) + err = vs.updateShardDataPeerState(header, previousHeader) if err != nil { return nil, err } - return p.peerAdapter.RootHash() + vs.displayRatings() + + return vs.peerAdapter.RootHash() +} + +func (vs *validatorStatistics) displayRatings() { + for _, node := range vs.initialNodes { + log.Trace("ratings", "pk", node.Address, "tempRating", vs.getTempRating(node.PubKey)) + } } // Commit commits the validator statistics trie and returns the root hash -func (p *validatorStatistics) Commit() ([]byte, error) { - return p.peerAdapter.Commit() +func (vs *validatorStatistics) Commit() ([]byte, error) { + return vs.peerAdapter.Commit() } // RootHash returns the root hash of the validator statistics trie -func (p *validatorStatistics) RootHash() ([]byte, error) { - return p.peerAdapter.RootHash() +func (vs *validatorStatistics) RootHash() ([]byte, error) { + return vs.peerAdapter.RootHash() } -func (p *validatorStatistics) checkForMissedBlocks( +func (vs *validatorStatistics) checkForMissedBlocks( currentHeaderRound, previousHeaderRound uint64, prevRandSeed []byte, @@ -274,21 +319,49 @@ func (p *validatorStatistics) checkForMissedBlocks( return nil } + sw := core.NewStopWatch() + sw.Start("checkForMissedBlocks") + defer func() { + sw.Stop("checkForMissedBlocks") + log.Debug("measurements checkForMissedBlocks", sw.GetMeasurements()...) + }() + for i := previousHeaderRound + 1; i < currentHeaderRound; i++ { - consensusGroup, err := p.nodesCoordinator.ComputeValidatorsGroup(prevRandSeed, i, shardId) + swInner := core.NewStopWatch() + + swInner.Start("ComputeValidatorsGroup") + consensusGroup, err := vs.nodesCoordinator.ComputeValidatorsGroup(prevRandSeed, i, shardId) + swInner.Stop("ComputeValidatorsGroup") if err != nil { return err } - leaderPeerAcc, err := p.GetPeerAccount(consensusGroup[0].PubKey()) + swInner.Start("GetPeerAccount") + leaderPeerAcc, err := vs.GetPeerAccount(consensusGroup[0].PubKey()) + swInner.Stop("GetPeerAccount") if err != nil { return err } + swInner.Start("DecreaseLeaderSuccessRateWithJournal") err = leaderPeerAcc.DecreaseLeaderSuccessRateWithJournal() + swInner.Stop("DecreaseLeaderSuccessRateWithJournal") + if err != nil { + return err + } + + swInner.Start("ComputeDecreaseProposer") + newRating := vs.rater.ComputeDecreaseProposer(leaderPeerAcc.GetTempRating()) + swInner.Stop("ComputeDecreaseProposer") + + swInner.Start("SetTempRatingWithJournal") + err = leaderPeerAcc.SetTempRatingWithJournal(newRating) + swInner.Stop("SetTempRatingWithJournal") if err != nil { return err } + + sw.Add(swInner) } return nil @@ -296,16 +369,16 @@ func (p *validatorStatistics) checkForMissedBlocks( // RevertPeerState takes the current and previous headers and undos the peer state // for all of the consensus members -func (p *validatorStatistics) RevertPeerState(header data.HeaderHandler) error { - return p.peerAdapter.RecreateTrie(header.GetValidatorStatsRootHash()) +func (vs *validatorStatistics) RevertPeerState(header data.HeaderHandler) error { + return vs.peerAdapter.RecreateTrie(header.GetValidatorStatsRootHash()) } // RevertPeerStateToSnapshot reverts the applied changes to the peerAdapter -func (p *validatorStatistics) RevertPeerStateToSnapshot(snapshot int) error { - return p.peerAdapter.RevertToSnapshot(snapshot) +func (vs *validatorStatistics) RevertPeerStateToSnapshot(snapshot int) error { + return vs.peerAdapter.RevertToSnapshot(snapshot) } -func (p *validatorStatistics) updateShardDataPeerState(header, previousHeader data.HeaderHandler) error { +func (vs *validatorStatistics) updateShardDataPeerState(header, previousHeader data.HeaderHandler) error { metaHeader, ok := header.(*block.MetaBlock) if !ok { return process.ErrInvalidMetaHeader @@ -315,19 +388,19 @@ func (p *validatorStatistics) updateShardDataPeerState(header, previousHeader da return process.ErrInvalidMetaHeader } - err := p.mediator.loadPreviousShardHeaders(metaHeader, prevMetaHeader) + err := vs.mediator.loadPreviousShardHeaders(metaHeader, prevMetaHeader) if err != nil { return err } for _, h := range metaHeader.ShardInfo { - shardConsensus, shardInfoErr := p.nodesCoordinator.ComputeValidatorsGroup(h.PrevRandSeed, h.Round, h.ShardID) + shardConsensus, shardInfoErr := vs.nodesCoordinator.ComputeValidatorsGroup(h.PrevRandSeed, h.Round, h.ShardID) if shardInfoErr != nil { return shardInfoErr } - shardInfoErr = p.updateValidatorInfo(shardConsensus, h.ShardID) + shardInfoErr = vs.updateValidatorInfo(shardConsensus) if shardInfoErr != nil { return shardInfoErr } @@ -336,15 +409,15 @@ func (p *validatorStatistics) updateShardDataPeerState(header, previousHeader da continue } - sdKey := p.buildShardDataKey(h) - p.mutPrevShardInfo.RLock() - prevShardData, prevDataOk := p.prevShardInfo[sdKey] - p.mutPrevShardInfo.RUnlock() + sdKey := vs.buildShardDataKey(h) + vs.mutPrevShardInfo.RLock() + prevShardData, prevDataOk := vs.prevShardInfo[sdKey] + vs.mutPrevShardInfo.RUnlock() if !prevDataOk { return process.ErrMissingPrevShardData } - shardInfoErr = p.checkForMissedBlocks( + shardInfoErr = vs.checkForMissedBlocks( h.Round, prevShardData.Round, prevShardData.PrevRandSeed, @@ -358,17 +431,18 @@ func (p *validatorStatistics) updateShardDataPeerState(header, previousHeader da return nil } -func (p *validatorStatistics) initializeNode(node *sharding.InitialNode, stakeValue *big.Int) error { - if !p.IsNodeValid(node) { +func (vs *validatorStatistics) initializeNode(node *sharding.InitialNode, stakeValue *big.Int, + startRating uint32) error { + if !vs.IsNodeValid(node) { return process.ErrInvalidInitialNodesState } - peerAccount, err := p.generatePeerAccount(node) + peerAccount, err := vs.generatePeerAccount(node) if err != nil { return err } - err = p.savePeerAccountData(peerAccount, node, stakeValue) + err = vs.savePeerAccountData(peerAccount, node, stakeValue, startRating) if err != nil { return err } @@ -376,13 +450,13 @@ func (p *validatorStatistics) initializeNode(node *sharding.InitialNode, stakeVa return nil } -func (p *validatorStatistics) generatePeerAccount(node *sharding.InitialNode) (*state.PeerAccount, error) { - address, err := p.adrConv.CreateAddressFromHex(node.PubKey) +func (vs *validatorStatistics) generatePeerAccount(node *sharding.InitialNode) (*state.PeerAccount, error) { + address, err := vs.adrConv.CreateAddressFromHex(node.PubKey) if err != nil { return nil, err } - acc, err := p.peerAdapter.GetAccountWithJournal(address) + acc, err := vs.peerAdapter.GetAccountWithJournal(address) if err != nil { return nil, err } @@ -395,12 +469,13 @@ func (p *validatorStatistics) generatePeerAccount(node *sharding.InitialNode) (* return peerAccount, nil } -func (p *validatorStatistics) savePeerAccountData( +func (vs *validatorStatistics) savePeerAccountData( peerAccount *state.PeerAccount, data *sharding.InitialNode, stakeValue *big.Int, + startRating uint32, ) error { - err := peerAccount.SetAddressWithJournal([]byte(data.Address)) + err := peerAccount.SetRewardAddressWithJournal([]byte(data.Address)) if err != nil { return err } @@ -420,24 +495,38 @@ func (p *validatorStatistics) savePeerAccountData( return err } + err = peerAccount.SetRatingWithJournal(startRating) + if err != nil { + return err + } + + err = peerAccount.SetTempRatingWithJournal(startRating) + if err != nil { + return err + } + return nil } -func (p *validatorStatistics) updateValidatorInfo(validatorList []sharding.Validator, shardId uint32) error { +func (vs *validatorStatistics) updateValidatorInfo(validatorList []sharding.Validator) error { lenValidators := len(validatorList) for i := 0; i < lenValidators; i++ { - peerAcc, err := p.GetPeerAccount(validatorList[i].PubKey()) + peerAcc, err := vs.GetPeerAccount(validatorList[i].PubKey()) if err != nil { return err } + var newRating uint32 isLeader := i == 0 if isLeader { err = peerAcc.IncreaseLeaderSuccessRateWithJournal() + newRating = vs.rater.ComputeIncreaseProposer(peerAcc.GetTempRating()) } else { err = peerAcc.IncreaseValidatorSuccessRateWithJournal() + newRating = vs.rater.ComputeIncreaseValidator(peerAcc.GetTempRating()) } + err = peerAcc.SetTempRatingWithJournal(newRating) if err != nil { return err } @@ -447,13 +536,13 @@ func (p *validatorStatistics) updateValidatorInfo(validatorList []sharding.Valid } // GetPeerAccount will return a PeerAccountHandler for a given address -func (p *validatorStatistics) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { - addressContainer, err := p.adrConv.CreateAddressFromPublicKeyBytes(address) +func (vs *validatorStatistics) GetPeerAccount(address []byte) (state.PeerAccountHandler, error) { + addressContainer, err := vs.adrConv.CreateAddressFromPublicKeyBytes(address) if err != nil { return nil, err } - account, err := p.peerAdapter.GetAccountWithJournal(addressContainer) + account, err := vs.peerAdapter.GetAccountWithJournal(addressContainer) if err != nil { return nil, err } @@ -469,10 +558,10 @@ func (p *validatorStatistics) GetPeerAccount(address []byte) (state.PeerAccountH // loadPreviousShardHeaders loads the previous shard headers for a given metablock. For the metachain it's easy // since it has all the shard headers in its storage, but for the shard it's a bit trickier and we need // to iterate through past metachain headers until we find all the ShardData we are interested in -func (p *validatorStatistics) loadPreviousShardHeaders(currentHeader, previousHeader *block.MetaBlock) error { +func (vs *validatorStatistics) loadPreviousShardHeaders(currentHeader, previousHeader *block.MetaBlock) error { - missingPreviousShardData := p.loadExistingPrevShardData(currentHeader, previousHeader) - missingPreviousShardData, err := p.loadMissingPrevShardDataFromStorage(missingPreviousShardData, previousHeader) + missingPreviousShardData := vs.loadExistingPrevShardData(currentHeader, previousHeader) + missingPreviousShardData, err := vs.loadMissingPrevShardDataFromStorage(missingPreviousShardData, previousHeader) if err != nil { return err } @@ -484,11 +573,11 @@ func (p *validatorStatistics) loadPreviousShardHeaders(currentHeader, previousHe return nil } -func (p *validatorStatistics) loadExistingPrevShardData(currentHeader, previousHeader *block.MetaBlock) map[string]block.ShardData { - p.mutPrevShardInfo.Lock() - defer p.mutPrevShardInfo.Unlock() +func (vs *validatorStatistics) loadExistingPrevShardData(currentHeader, previousHeader *block.MetaBlock) map[string]block.ShardData { + vs.mutPrevShardInfo.Lock() + defer vs.mutPrevShardInfo.Unlock() - p.prevShardInfo = make(map[string]block.ShardData, len(currentHeader.ShardInfo)) + vs.prevShardInfo = make(map[string]block.ShardData, len(currentHeader.ShardInfo)) missingPreviousShardData := make(map[string]block.ShardData, len(currentHeader.ShardInfo)) for _, currentShardData := range currentHeader.ShardInfo { @@ -496,16 +585,16 @@ func (p *validatorStatistics) loadExistingPrevShardData(currentHeader, previousH continue } - sdKey := p.buildShardDataKey(currentShardData) - prevShardData := p.getMatchingPrevShardData(currentShardData, currentHeader.ShardInfo) + sdKey := vs.buildShardDataKey(currentShardData) + prevShardData := vs.getMatchingPrevShardData(currentShardData, currentHeader.ShardInfo) if prevShardData != nil { - p.prevShardInfo[sdKey] = *prevShardData + vs.prevShardInfo[sdKey] = *prevShardData continue } - prevShardData = p.getMatchingPrevShardData(currentShardData, previousHeader.ShardInfo) + prevShardData = vs.getMatchingPrevShardData(currentShardData, previousHeader.ShardInfo) if prevShardData != nil { - p.prevShardInfo[sdKey] = *prevShardData + vs.prevShardInfo[sdKey] = *prevShardData continue } @@ -515,9 +604,9 @@ func (p *validatorStatistics) loadExistingPrevShardData(currentHeader, previousH return missingPreviousShardData } -func (p *validatorStatistics) loadMissingPrevShardDataFromStorage(missingPreviousShardData map[string]block.ShardData, previousHeader *block.MetaBlock) (map[string]block.ShardData, error) { - p.mutPrevShardInfo.Lock() - defer p.mutPrevShardInfo.Unlock() +func (vs *validatorStatistics) loadMissingPrevShardDataFromStorage(missingPreviousShardData map[string]block.ShardData, previousHeader *block.MetaBlock) (map[string]block.ShardData, error) { + vs.mutPrevShardInfo.Lock() + defer vs.mutPrevShardInfo.Unlock() searchHeader := &block.MetaBlock{} *searchHeader = *previousHeader @@ -526,17 +615,17 @@ func (p *validatorStatistics) loadMissingPrevShardDataFromStorage(missingPreviou break } - recursiveHeader, err := process.GetMetaHeader(searchHeader.GetPrevHash(), p.dataPool.MetaBlocks(), p.marshalizer, p.storageService) + recursiveHeader, err := process.GetMetaHeader(searchHeader.GetPrevHash(), vs.dataPool.Headers(), vs.marshalizer, vs.storageService) if err != nil { return nil, err } for key, shardData := range missingPreviousShardData { - prevShardData := p.getMatchingPrevShardData(shardData, recursiveHeader.ShardInfo) + prevShardData := vs.getMatchingPrevShardData(shardData, recursiveHeader.ShardInfo) if prevShardData == nil { continue } - p.prevShardInfo[key] = *prevShardData + vs.prevShardInfo[key] = *prevShardData delete(missingPreviousShardData, key) } *searchHeader = *recursiveHeader @@ -545,11 +634,11 @@ func (p *validatorStatistics) loadMissingPrevShardDataFromStorage(missingPreviou return missingPreviousShardData, nil } -func (p *validatorStatistics) loadPreviousShardHeadersMeta(header *block.MetaBlock) error { - p.mutPrevShardInfo.Lock() - defer p.mutPrevShardInfo.Unlock() +func (vs *validatorStatistics) loadPreviousShardHeadersMeta(header *block.MetaBlock) error { + vs.mutPrevShardInfo.Lock() + defer vs.mutPrevShardInfo.Unlock() - metaDataPool, ok := p.dataPool.(dataRetriever.MetaPoolsHolder) + metaDataPool, ok := vs.dataPool.(dataRetriever.MetaPoolsHolder) if !ok { return process.ErrInvalidMetaPoolHolder } @@ -561,16 +650,16 @@ func (p *validatorStatistics) loadPreviousShardHeadersMeta(header *block.MetaBlo previousHeader, err := process.GetShardHeader( shardData.PrevHash, - metaDataPool.ShardHeaders(), - p.marshalizer, - p.storageService, + metaDataPool.Headers(), + vs.marshalizer, + vs.storageService, ) if err != nil { return err } - sdKey := p.buildShardDataKey(shardData) - p.prevShardInfo[sdKey] = block.ShardData{ + sdKey := vs.buildShardDataKey(shardData) + vs.prevShardInfo[sdKey] = block.ShardData{ ShardID: previousHeader.ShardId, Nonce: previousHeader.Nonce, Round: previousHeader.Round, @@ -581,7 +670,7 @@ func (p *validatorStatistics) loadPreviousShardHeadersMeta(header *block.MetaBlo return nil } -func (p *validatorStatistics) getMatchingPrevShardData(currentShardData block.ShardData, shardInfo []block.ShardData) *block.ShardData { +func (vs *validatorStatistics) getMatchingPrevShardData(currentShardData block.ShardData, shardInfo []block.ShardData) *block.ShardData { for _, prevShardData := range shardInfo { if currentShardData.ShardID != prevShardData.ShardID { continue @@ -594,21 +683,43 @@ func (p *validatorStatistics) getMatchingPrevShardData(currentShardData block.Sh return nil } -func (p *validatorStatistics) buildShardDataKey(sh block.ShardData) string { +func (vs *validatorStatistics) buildShardDataKey(sh block.ShardData) string { return fmt.Sprintf("%d_%d", sh.ShardID, sh.Nonce) } -func (p *validatorStatistics) createMediator() shardMetaMediator { - if p.shardCoordinator.SelfId() < sharding.MetachainShardId { - return &shardMediator{p} +func (vs *validatorStatistics) createMediator() shardMetaMediator { + if vs.shardCoordinator.SelfId() < sharding.MetachainShardId { + return &shardMediator{ + vs: vs, + } + } + return &metaMediator{ + vs: vs, } - return &metaMediator{p} } // IsInterfaceNil returns true if there is no value under the interface -func (p *validatorStatistics) IsInterfaceNil() bool { - if p == nil { - return true +func (vs *validatorStatistics) IsInterfaceNil() bool { + return vs == nil +} + +func (vs *validatorStatistics) getRating(s string) uint32 { + peer, err := vs.GetPeerAccount([]byte(s)) + if err != nil { + log.Debug("Error getting peer account", "error", err) + return vs.rater.GetStartRating() + } + + return peer.GetRating() +} + +func (vs *validatorStatistics) getTempRating(s string) uint32 { + peer, err := vs.GetPeerAccount([]byte(s)) + + if err != nil { + log.Debug("Error getting peer account", "error", err) + return vs.rater.GetStartRating() } - return false + + return peer.GetTempRating() } diff --git a/process/peer/process_test.go b/process/peer/process_test.go index e252f9b291c..c02a0000d82 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -3,6 +3,7 @@ package peer_test import ( "errors" "fmt" + "github.com/ElrondNetwork/elrond-go/data" "math/big" "testing" @@ -33,14 +34,25 @@ func CreateMockArguments() peer.ArgValidatorStatisticsProcessor { BurnPercentage: 0.40, }, FeeSettings: config.FeeSettings{ - MaxGasLimitPerBlock: "10000000", - MinGasPrice: "10", - MinGasLimit: "10", + MaxGasLimitPerBlock: "10000000", + MinGasPrice: "10", + MinGasLimit: "10", + GasPerDataByte: "1", + DataLimitForBaseCalc: "10000", }, ValidatorSettings: config.ValidatorSettings{ StakeValue: "500", UnBoundPeriod: "5", }, + RatingSettings: config.RatingSettings{ + StartRating: 5, + MaxRating: 10, + MinRating: 1, + ProposerIncreaseRatingStep: 2, + ProposerDecreaseRatingStep: 4, + ValidatorIncreaseRatingStep: 1, + ValidatorDecreaseRatingStep: 2, + }, }, ) @@ -53,11 +65,17 @@ func CreateMockArguments() peer.ArgValidatorStatisticsProcessor { ShardCoordinator: mock.NewOneShardCoordinatorMock(), AdrConv: &mock.AddressConverterMock{}, PeerAdapter: getAccountsMock(), - Economics: economicsData, + StakeValue: economicsData.StakeValue(), + Rater: createMockRater(), } return arguments } +func createMockRater() *mock.RaterMock { + rater := mock.GetNewMockRater() + return rater +} + func TestNewValidatorStatisticsProcessor_NilPeerAdaptersShouldErr(t *testing.T) { t.Parallel() @@ -152,7 +170,7 @@ func TestValidatorStatisticsProcessor_SaveInitialStateErrOnInvalidNode(t *testin validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) initialNodes := []*sharding.InitialNode{{PubKey: "", Address: ""}} - err := validatorStatistics.SaveInitialState(initialNodes, big.NewInt(100)) + err := validatorStatistics.SaveInitialState(initialNodes, big.NewInt(100), uint32(5)) assert.Equal(t, process.ErrInvalidInitialNodesState, err) } @@ -515,12 +533,8 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateGetHeaderError(t *testing.T arguments := CreateMockArguments() arguments.Marshalizer = marshalizer arguments.DataPool = &mock.MetaPoolsHolderFake{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - } + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} }, } arguments.StorageService = &mock.ChainStorerMock{ @@ -544,6 +558,7 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateGetHeaderError(t *testing.T }, } arguments.PeerAdapter = adapter + arguments.Rater = mock.GetNewMockRater() validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) header := getMetaHeaderHandler([]byte("header")) @@ -572,12 +587,8 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateGetHeaderUnmarshalError(t * arguments := CreateMockArguments() arguments.Marshalizer = marshalizer arguments.DataPool = &mock.MetaPoolsHolderFake{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - } + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} }, } arguments.StorageService = &mock.ChainStorerMock{ @@ -601,6 +612,8 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateGetHeaderUnmarshalError(t * }, } arguments.PeerAdapter = adapter + arguments.Rater = mock.GetNewMockRater() + validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) header := getMetaHeaderHandler([]byte("header")) @@ -639,12 +652,8 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateCallsIncrease(t *testing.T) arguments.InitialNodes = nil arguments.Marshalizer = marshalizer arguments.DataPool = &mock.MetaPoolsHolderFake{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - } + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} }, } arguments.StorageService = &mock.ChainStorerMock{ @@ -668,6 +677,7 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateCallsIncrease(t *testing.T) }, } arguments.PeerAdapter = adapter + arguments.Rater = mock.GetNewMockRater() validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) header := getMetaHeaderHandler([]byte("header")) @@ -713,12 +723,8 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateCheckForMissedBlocksErr(t * arguments := CreateMockArguments() arguments.DataPool = &mock.MetaPoolsHolderFake{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - } + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} }, } arguments.StorageService = &mock.ChainStorerMock{ @@ -743,6 +749,7 @@ func TestValidatorStatisticsProcessor_UpdatePeerStateCheckForMissedBlocksErr(t * } arguments.PeerAdapter = adapter arguments.Marshalizer = marshalizer + arguments.Rater = mock.GetNewMockRater() validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) @@ -822,7 +829,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksErrOnComputeValidatorL arguments.ShardCoordinator = shardCoordinatorMock arguments.AdrConv = &mock.AddressConverterMock{} arguments.PeerAdapter = getAccountsMock() - + arguments.Rater = mock.GetNewMockRater() validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) err := validatorStatistics.CheckForMissedBlocks(2, 0, []byte("prev"), 0) assert.Equal(t, computeErr, err) @@ -852,7 +859,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksErrOnGetPeerAcc(t *tes }, } arguments.PeerAdapter = getAccountsMock() - + arguments.Rater = mock.GetNewMockRater() validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) err := validatorStatistics.CheckForMissedBlocks(2, 0, []byte("prev"), 0) assert.Equal(t, peerAccErr, err) @@ -887,7 +894,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksErrOnDecrease(t *testi }, } arguments.PeerAdapter = peerAdapter - + arguments.Rater = mock.GetNewMockRater() validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) err := validatorStatistics.CheckForMissedBlocks(2, 0, []byte("prev"), 0) assert.Equal(t, decreaseErr, err) @@ -926,7 +933,7 @@ func TestValidatorStatisticsProcessor_CheckForMissedBlocksCallsDecrease(t *testi }, } arguments.PeerAdapter = peerAdapter - + arguments.Rater = mock.GetNewMockRater() validatorStatistics, _ := peer.NewValidatorStatisticsProcessor(arguments) _ = validatorStatistics.CheckForMissedBlocks(uint64(currentHeaderRound), uint64(previousHeaderRound), []byte("prev"), 0) assert.Equal(t, currentHeaderRound-previousHeaderRound-1, decreaseCount) @@ -1027,10 +1034,10 @@ func TestValidatorStatisticsProcessor_LoadPreviousShardHeadersMeta(t *testing.T) arguments := CreateMockArguments() arguments.DataPool = &mock.MetaPoolsHolderFake{ - ShardHeadersCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return prevHeader, true + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return prevHeader, nil }, } }, @@ -1059,10 +1066,10 @@ func TestValidatorStatisticsProcessor_LoadPreviousShardHeadersLoadsMissingFromSt storageHeader := &block.MetaBlock{Nonce: 2, ShardInfo: []block.ShardData{sd1}} arguments.DataPool = &mock.MetaPoolsHolderFake{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return storageHeader, true + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return storageHeader, nil }, } }, @@ -1088,12 +1095,8 @@ func TestValidatorStatisticsProcessor_LoadPreviousShardHeadersErrForStorage(t *t prevHeader := &block.MetaBlock{Nonce: 3, ShardInfo: []block.ShardData{}} arguments.DataPool = &mock.MetaPoolsHolderFake{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - } + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} }, } @@ -1126,10 +1129,10 @@ func TestValidatorStatisticsProcessor_LoadPreviousShardHeadersErrIfStillMissing( storageHeader := &block.MetaBlock{Nonce: 1, ShardInfo: []block.ShardData{}} arguments.DataPool = &mock.MetaPoolsHolderFake{ - MetaBlocksCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return storageHeader, true + ShardHeadersCalled: func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{ + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return storageHeader, nil }, } }, diff --git a/process/peer/ratingReader.go b/process/peer/ratingReader.go new file mode 100644 index 00000000000..623bd78b582 --- /dev/null +++ b/process/peer/ratingReader.go @@ -0,0 +1,23 @@ +package peer + +type RatingReader struct { + getRating func(string) uint32 + getRatings func([]string) map[string]uint32 +} + +//GetRating returns the Rating for the specified public key +func (bsr *RatingReader) GetRating(pk string) uint32 { + rating := bsr.getRating(pk) + return rating +} + +//GetRatings gets all the ratings that the current rater has +func (bsr *RatingReader) GetRatings(addresses []string) map[string]uint32 { + ratings := bsr.getRatings(addresses) + return ratings +} + +//IsInterfaceNil checks if the underlying object is nil +func (bsr *RatingReader) IsInterfaceNil() bool { + return bsr == nil +} diff --git a/process/rating/NilRatingReader.go b/process/rating/NilRatingReader.go new file mode 100644 index 00000000000..8785f56853f --- /dev/null +++ b/process/rating/NilRatingReader.go @@ -0,0 +1,24 @@ +package rating + +type NilRatingReader struct { +} + +//GetRating gets the rating for the public key +func (*NilRatingReader) GetRating(string) uint32 { + return 1 +} + +//GetRatings gets all the ratings as a map[pk] ratingValue +func (*NilRatingReader) GetRatings(pks []string) map[string]uint32 { + ratingsMap := make(map[string]uint32) + + for _, val := range pks { + ratingsMap[val] = 1 + } + return ratingsMap +} + +//IsInterfaceNil verifies if the interface is nil +func (rr *NilRatingReader) IsInterfaceNil() bool { + return rr == nil +} diff --git a/process/rating/blockSigningRater.go b/process/rating/blockSigningRater.go new file mode 100644 index 00000000000..fa3a425dd0b --- /dev/null +++ b/process/rating/blockSigningRater.go @@ -0,0 +1,100 @@ +package rating + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// BlockSigningRater defines the behaviour of a struct able to do ratings for validators +type BlockSigningRater struct { + sharding.RatingReader + startRating uint32 + maxRating uint32 + minRating uint32 + proposerIncreaseRatingStep int32 + proposerDecreaseRatingStep int32 + validatorIncreaseRatingStep int32 + validatorDecreaseRatingStep int32 +} + +//NewBlockSigningRater creates a new RaterHandler of Type BlockSigningRater +func NewBlockSigningRater(ratingsData *economics.RatingsData) (*BlockSigningRater, error) { + if ratingsData.MinRating() > ratingsData.MaxRating() { + return nil, process.ErrMaxRatingIsSmallerThanMinRating + } + if ratingsData.MaxRating() < ratingsData.StartRating() || ratingsData.MinRating() > ratingsData.StartRating() { + return nil, process.ErrStartRatingNotBetweenMinAndMax + } + + return &BlockSigningRater{ + startRating: ratingsData.StartRating(), + minRating: ratingsData.MinRating(), + maxRating: ratingsData.MaxRating(), + proposerIncreaseRatingStep: int32(ratingsData.ProposerIncreaseRatingStep()), + proposerDecreaseRatingStep: int32(0 - ratingsData.ProposerDecreaseRatingStep()), + validatorIncreaseRatingStep: int32(ratingsData.ValidatorIncreaseRatingStep()), + validatorDecreaseRatingStep: int32(0 - ratingsData.ValidatorDecreaseRatingStep()), + RatingReader: &NilRatingReader{}, + }, nil +} + +func (bsr *BlockSigningRater) computeRating(ratingStep int32, val uint32) uint32 { + newVal := int64(val) + int64(ratingStep) + if newVal < int64(bsr.minRating) { + return bsr.minRating + } + if newVal > int64(bsr.maxRating) { + return bsr.maxRating + } + + return uint32(newVal) +} + +//GetRating returns the Rating for the specified public key +func (bsr *BlockSigningRater) GetRating(pk string) uint32 { + return bsr.RatingReader.GetRating(pk) +} + +//GetRatings gets all the ratings that the current rater has +func (bsr *BlockSigningRater) GetRatings(addresses []string) map[string]uint32 { + return bsr.RatingReader.GetRatings(addresses) +} + +//SetRatingReader sets the Reader that can read ratings +func (bsr *BlockSigningRater) SetRatingReader(reader sharding.RatingReader) { + if !check.IfNil(reader) { + bsr.RatingReader = reader + } +} + +//SetRatingReader sets the Reader that can read ratings +func (bsr *BlockSigningRater) IsInterfaceNil() bool { + return bsr == nil +} + +//GetStartRating gets the StartingRating +func (bsr *BlockSigningRater) GetStartRating() uint32 { + return bsr.startRating +} + +//ComputeIncreaseProposer computes the new rating for the increaseLeader +func (bsr *BlockSigningRater) ComputeIncreaseProposer(val uint32) uint32 { + return bsr.computeRating(bsr.proposerIncreaseRatingStep, val) +} + +//ComputeDecreaseProposer computes the new rating for the decreaseLeader +func (bsr *BlockSigningRater) ComputeDecreaseProposer(val uint32) uint32 { + return bsr.computeRating(bsr.proposerDecreaseRatingStep, val) +} + +//ComputeIncreaseValidator computes the new rating for the increaseValidator +func (bsr *BlockSigningRater) ComputeIncreaseValidator(val uint32) uint32 { + return bsr.computeRating(bsr.validatorIncreaseRatingStep, val) +} + +//ComputeDecreaseValidator computes the new rating for the decreaseValidator +func (bsr *BlockSigningRater) ComputeDecreaseValidator(val uint32) uint32 { + return bsr.computeRating(bsr.validatorDecreaseRatingStep, val) +} diff --git a/process/rating/blockSigningRater_test.go b/process/rating/blockSigningRater_test.go new file mode 100644 index 00000000000..6abdbb119ee --- /dev/null +++ b/process/rating/blockSigningRater_test.go @@ -0,0 +1,283 @@ +package rating_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rating" + "github.com/stretchr/testify/assert" +) + +const ( + validatorIncreaseRatingStep = uint32(1) + validatorDecreaseRatingStep = uint32(2) + proposerIncreaseRatingStep = uint32(2) + proposerDecreaseRatingStep = uint32(4) + minRating = uint32(1) + maxRating = uint32(10) + startRating = uint32(5) +) + +func createDefaultRatingsData() *economics.RatingsData { + data := config.RatingSettings{ + StartRating: startRating, + MaxRating: maxRating, + MinRating: minRating, + ProposerIncreaseRatingStep: proposerIncreaseRatingStep, + ProposerDecreaseRatingStep: proposerDecreaseRatingStep, + ValidatorIncreaseRatingStep: validatorIncreaseRatingStep, + ValidatorDecreaseRatingStep: validatorDecreaseRatingStep, + } + + ratingsData, _ := economics.NewRatingsData(data) + return ratingsData +} + +func createDefaultRatingReader(ratingsMap map[string]uint32) *mock.RatingReaderMock { + rrm := &mock.RatingReaderMock{ + RatingsMap: ratingsMap, + GetRatingCalled: func(s string) uint32 { + value, ok := ratingsMap[s] + if !ok { + return startRating + } + return value + }, + GetRatingsCalled: func(pks []string) map[string]uint32 { + newMap := make(map[string]uint32) + for k, v := range ratingsMap { + for _, pk := range pks { + if k == pk { + newMap[k] = v + } + } + } + return newMap + }, + } + + return rrm +} + +func setupRater(rd *economics.RatingsData, pk string, initialRating uint32) *rating.BlockSigningRater { + bsr, _ := rating.NewBlockSigningRater(rd) + ratingPk := pk + ratingsMap := make(map[string]uint32) + ratingsMap[ratingPk] = initialRating + rrm := createDefaultRatingReader(ratingsMap) + bsr.SetRatingReader(rrm) + + return bsr +} + +func TestBlockSigningRater_GetRatingWithNotSetRatingReaderShouldReturnStartRating(t *testing.T) { + rd := createDefaultRatingsData() + + bsr, _ := rating.NewBlockSigningRater(rd) + rrm := createDefaultRatingReader(make(map[string]uint32)) + bsr.SetRatingReader(rrm) + + rt := bsr.GetRating("test") + + assert.Equal(t, rd.StartRating(), rt) +} + +func TestBlockSigningRater_GetRatingWithUnknownPkShoudReturnStartRating(t *testing.T) { + rd := createDefaultRatingsData() + bsr, _ := rating.NewBlockSigningRater(rd) + + rrm := createDefaultRatingReader(make(map[string]uint32)) + bsr.SetRatingReader(rrm) + + rt := bsr.GetRating("test") + + assert.Equal(t, startRating, rt) +} + +func TestBlockSigningRater_GetRatingsWithAllKnownPeersShouldReturnRatings(t *testing.T) { + rd := createDefaultRatingsData() + bsr, _ := rating.NewBlockSigningRater(rd) + + pk1 := "pk1" + pk2 := "pk2" + + pk1Rating := uint32(4) + pk2Rating := uint32(6) + + ratingsMap := make(map[string]uint32) + ratingsMap[pk1] = pk1Rating + ratingsMap[pk2] = pk2Rating + + rrm := createDefaultRatingReader(ratingsMap) + bsr.SetRatingReader(rrm) + + rt := bsr.GetRatings([]string{pk2, pk1}) + + for pk, val := range rt { + assert.Equal(t, ratingsMap[pk], val) + } +} + +func TestBlockSigningRater_GetRatingsWithNotAllKnownPeersShouldReturnRatings(t *testing.T) { + rd := createDefaultRatingsData() + bsr, _ := rating.NewBlockSigningRater(rd) + + pk1 := "pk1" + pk2 := "pk2" + pk3 := "pk3" + + pk1Rating := uint32(4) + pk2Rating := uint32(6) + + ratingsMap := make(map[string]uint32) + ratingsMap[pk1] = pk1Rating + ratingsMap[pk2] = pk2Rating + + rrm := createDefaultRatingReader(ratingsMap) + bsr.SetRatingReader(rrm) + + rt := bsr.GetRatings([]string{pk2, pk3, pk1}) + + for pk, val := range rt { + assert.Equal(t, ratingsMap[pk], val) + } +} + +func TestBlockSigningRater_GetRatingWithKnownPkShoudReturnSetRating(t *testing.T) { + rd := createDefaultRatingsData() + + bsr, _ := rating.NewBlockSigningRater(rd) + + ratingPk := "test" + ratingValue := uint32(5) + + ratingsMap := make(map[string]uint32) + ratingsMap[ratingPk] = ratingValue + rrd := createDefaultRatingReader(ratingsMap) + bsr.SetRatingReader(rrd) + rt := bsr.GetRating(ratingPk) + + assert.Equal(t, ratingValue, rt) +} + +func TestBlockSigningRater_UpdateRatingsShouldUpdateRatingWhenProposed(t *testing.T) { + pk := "test" + initialRatingValue := uint32(5) + rd := createDefaultRatingsData() + + bsr := setupRater(rd, pk, initialRatingValue) + computedRating := bsr.ComputeIncreaseProposer(initialRatingValue) + + expectedValue := initialRatingValue + proposerIncreaseRatingStep + + assert.Equal(t, expectedValue, computedRating) +} + +func TestBlockSigningRater_UpdateRatingsShouldUpdateRatingWhenValidator(t *testing.T) { + pk := "test" + initialRatingValue := uint32(5) + rd := createDefaultRatingsData() + + bsr := setupRater(rd, pk, initialRatingValue) + + computedRating := bsr.ComputeIncreaseValidator(initialRatingValue) + + expectedValue := initialRatingValue + validatorIncreaseRatingStep + + assert.Equal(t, expectedValue, computedRating) +} + +func TestBlockSigningRater_UpdateRatingsShouldUpdateRatingWhenValidatorButNotAccepted(t *testing.T) { + pk := "test" + initialRatingValue := uint32(5) + rd := createDefaultRatingsData() + + bsr := setupRater(rd, pk, initialRatingValue) + + computedRating := bsr.ComputeDecreaseValidator(initialRatingValue) + + expectedValue := initialRatingValue - validatorDecreaseRatingStep + + assert.Equal(t, expectedValue, computedRating) +} + +func TestBlockSigningRater_UpdateRatingsShouldUpdateRatingWhenProposerButNotAccepted(t *testing.T) { + pk := "test" + initialRatingValue := uint32(5) + rd := createDefaultRatingsData() + + bsr := setupRater(rd, pk, initialRatingValue) + + computedRating := bsr.ComputeDecreaseProposer(initialRatingValue) + + expectedValue := initialRatingValue - proposerDecreaseRatingStep + + assert.Equal(t, expectedValue, computedRating) +} + +func TestBlockSigningRater_UpdateRatingsShouldNotIncreaseAboveMaxRating(t *testing.T) { + pk := "test" + initialRatingValue := maxRating - 1 + rd := createDefaultRatingsData() + + bsr := setupRater(rd, pk, initialRatingValue) + computedRating := bsr.ComputeIncreaseProposer(initialRatingValue) + + expectedValue := maxRating + + assert.Equal(t, expectedValue, computedRating) +} + +func TestBlockSigningRater_UpdateRatingsShouldNotDecreaseBelowMinRating(t *testing.T) { + pk := "test" + initialRatingValue := minRating + 1 + rd := createDefaultRatingsData() + + bsr := setupRater(rd, pk, initialRatingValue) + computedRating := bsr.ComputeDecreaseProposer(initialRatingValue) + + expectedValue := minRating + + assert.Equal(t, expectedValue, computedRating) +} + +func TestBlockSigningRater_UpdateRatingsWithMultiplePeersShouldReturnRatings(t *testing.T) { + rd := createDefaultRatingsData() + bsr, _ := rating.NewBlockSigningRater(rd) + + pk1 := "pk1" + pk2 := "pk2" + pk3 := "pk3" + pk4 := "pk4" + + pk1Rating := uint32(4) + pk2Rating := uint32(5) + pk3Rating := uint32(6) + pk4Rating := uint32(7) + + ratingsMap := make(map[string]uint32) + ratingsMap[pk1] = pk1Rating + ratingsMap[pk2] = pk2Rating + ratingsMap[pk3] = pk3Rating + ratingsMap[pk4] = pk4Rating + + rrm := createDefaultRatingReader(ratingsMap) + bsr.SetRatingReader(rrm) + + pk1ComputedRating := bsr.ComputeIncreaseProposer(ratingsMap[pk1]) + pk2ComputedRating := bsr.ComputeDecreaseProposer(ratingsMap[pk2]) + pk3ComputedRating := bsr.ComputeIncreaseValidator(ratingsMap[pk3]) + pk4ComputedRating := bsr.ComputeDecreaseValidator(ratingsMap[pk4]) + + expectedPk1 := ratingsMap[pk1] + proposerIncreaseRatingStep + expectedPk2 := ratingsMap[pk2] - proposerDecreaseRatingStep + expectedPk3 := ratingsMap[pk3] + validatorIncreaseRatingStep + expectedPk4 := ratingsMap[pk4] - validatorDecreaseRatingStep + + assert.Equal(t, expectedPk1, pk1ComputedRating) + assert.Equal(t, expectedPk2, pk2ComputedRating) + assert.Equal(t, expectedPk3, pk3ComputedRating) + assert.Equal(t, expectedPk4, pk4ComputedRating) +} diff --git a/process/scToProtocol/stakingToPeer.go b/process/scToProtocol/stakingToPeer.go index f205ef294a9..08945a47c80 100644 --- a/process/scToProtocol/stakingToPeer.go +++ b/process/scToProtocol/stakingToPeer.go @@ -132,7 +132,7 @@ func (stp *stakingToPeer) UpdateProtocol(body block.Body, nonce uint64) error { return err } - for key := range affectedStates { + for _, key := range affectedStates { blsPubKey := []byte(key) peerAcc, err := stp.getPeerAccount(blsPubKey) if err != nil { @@ -165,7 +165,12 @@ func (stp *stakingToPeer) UpdateProtocol(body block.Body, nonce uint64) error { return err } - return stp.peerState.RemoveAccount(adrSrc) + err = stp.peerState.RemoveAccount(adrSrc) + if err != nil { + return err + } + + continue } var stakingData systemSmartContracts.StakingData @@ -174,12 +179,12 @@ func (stp *stakingToPeer) UpdateProtocol(body block.Body, nonce uint64) error { return err } - err = stp.createPeerChangeData(stakingData, peerAcc, nonce) + err = stp.createPeerChangeData(stakingData, peerAcc, nonce, blsPubKey) if err != nil { return err } - err = stp.updatePeerState(stakingData, peerAcc) + err = stp.updatePeerState(stakingData, peerAcc, blsPubKey) if err != nil { return err } @@ -193,7 +198,7 @@ func (stp *stakingToPeer) peerUnregistered(account *state.PeerAccount, nonce uin defer stp.mutPeerChanges.Unlock() actualPeerChange := block.PeerData{ - Address: account.Address, + Address: account.RewardAddress, PublicKey: account.BLSPublicKey, Action: block.PeerDeregistration, TimeStamp: nonce, @@ -212,9 +217,17 @@ func (stp *stakingToPeer) peerUnregistered(account *state.PeerAccount, nonce uin func (stp *stakingToPeer) updatePeerState( stakingData systemSmartContracts.StakingData, account *state.PeerAccount, + blsPubKey []byte, ) error { - if !bytes.Equal(stakingData.Address, account.Address) { - err := account.SetSchnorrPublicKeyWithJournal(stakingData.Address) + if !bytes.Equal(stakingData.Address, account.RewardAddress) { + err := account.SetRewardAddressWithJournal(stakingData.Address) + if err != nil { + return err + } + } + + if !bytes.Equal(blsPubKey, account.BLSPublicKey) { + err := account.SetBLSPublicKeyWithJournal(blsPubKey) if err != nil { return err } @@ -253,22 +266,25 @@ func (stp *stakingToPeer) createPeerChangeData( stakingData systemSmartContracts.StakingData, account *state.PeerAccount, nonce uint64, + blsKey []byte, ) error { stp.mutPeerChanges.Lock() defer stp.mutPeerChanges.Unlock() actualPeerChange := block.PeerData{ - Address: account.Address, + Address: account.RewardAddress, PublicKey: account.BLSPublicKey, Action: 0, TimeStamp: nonce, ValueChange: big.NewInt(0), } - if len(account.BLSPublicKey) == 0 { - actualPeerChange.Action = block.PeerRegistrantion + if len(account.RewardAddress) == 0 { + actualPeerChange.Action = block.PeerRegistration actualPeerChange.TimeStamp = stakingData.StartNonce actualPeerChange.ValueChange.Set(stakingData.StakeValue) + actualPeerChange.Address = stakingData.Address + actualPeerChange.PublicKey = blsKey peerHash, err := core.CalculateHash(stp.marshalizer, stp.hasher, actualPeerChange) if err != nil { @@ -290,7 +306,7 @@ func (stp *stakingToPeer) createPeerChangeData( } if stakingData.StartNonce == nonce { - actualPeerChange.Action = block.PeerRegistrantion + actualPeerChange.Action = block.PeerRegistration } if stakingData.UnStakedNonce == nonce { @@ -307,8 +323,8 @@ func (stp *stakingToPeer) createPeerChangeData( return nil } -func (stp *stakingToPeer) getAllModifiedStates(body block.Body) (map[string]struct{}, error) { - affectedStates := make(map[string]struct{}) +func (stp *stakingToPeer) getAllModifiedStates(body block.Body) ([]string, error) { + affectedStates := make([]string, 0) for _, miniBlock := range body { if miniBlock.Type != block.SmartContractResultBlock { @@ -333,13 +349,13 @@ func (stp *stakingToPeer) getAllModifiedStates(body block.Body) (map[string]stru return nil, process.ErrWrongTypeAssertion } - storageUpdates, err := stp.argParser.GetStorageUpdates(scr.Data) + storageUpdates, err := stp.argParser.GetStorageUpdates(string(scr.Data)) if err != nil { return nil, err } for _, storageUpdate := range storageUpdates { - affectedStates[string(storageUpdate.Offset)] = struct{}{} + affectedStates = append(affectedStates, string(storageUpdate.Offset)) } } } diff --git a/process/scToProtocol/stakingToPeer_test.go b/process/scToProtocol/stakingToPeer_test.go index 8afeb3940b2..caceb46d36b 100644 --- a/process/scToProtocol/stakingToPeer_test.go +++ b/process/scToProtocol/stakingToPeer_test.go @@ -261,9 +261,9 @@ func TestStakingToPeer_UpdateProtocolRemoveAccountShouldReturnNil(t *testing.T) peerState := &mock.AccountsStub{} peerState.GetAccountWithJournalCalled = func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { return &state.PeerAccount{ - Address: []byte("addr"), - BLSPublicKey: []byte("BlsAddr"), - Stake: big.NewInt(100), + RewardAddress: []byte("addr"), + BLSPublicKey: []byte("BlsAddr"), + Stake: big.NewInt(100), }, nil } peerState.RemoveAccountCalled = func(addressContainer state.AddressContainer) error { @@ -287,7 +287,7 @@ func TestStakingToPeer_UpdateProtocolRemoveAccountShouldReturnNil(t *testing.T) assert.Nil(t, err) } -func TestStakingToPeer_UpdateProtocolCannotSetSchnorrPublicKeyShouldErr(t *testing.T) { +func TestStakingToPeer_UpdateProtocolCannotSetRewardAddressShouldErr(t *testing.T) { t.Parallel() currTx := &mock.TxForCurrentBlockStub{} @@ -308,7 +308,7 @@ func TestStakingToPeer_UpdateProtocolCannotSetSchnorrPublicKeyShouldErr(t *testi peerState.GetAccountWithJournalCalled = func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { peerAccount, _ := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) peerAccount.Stake = big.NewInt(100) - peerAccount.Address = []byte("key") + peerAccount.RewardAddress = []byte("key") return peerAccount, nil } @@ -333,7 +333,7 @@ func TestStakingToPeer_UpdateProtocolCannotSetSchnorrPublicKeyShouldErr(t *testi blockBody := createBlockBody() err := stakingToPeer.UpdateProtocol(blockBody, 0) - assert.Equal(t, state.ErrNilSchnorrPublicKey, err) + assert.Equal(t, state.ErrEmptyAddress, err) } func TestStakingToPeer_UpdateProtocolCannotSaveAccountShouldErr(t *testing.T) { @@ -366,13 +366,13 @@ func TestStakingToPeer_UpdateProtocolCannotSaveAccountShouldErr(t *testing.T) { }, }) peerAccount.Stake = big.NewInt(0) - peerAccount.Address = []byte(address) + peerAccount.RewardAddress = []byte(address) return peerAccount, nil } stakingData := systemSmartContracts.StakingData{ StakeValue: big.NewInt(100), - Address: []byte(address), + Address: []byte(address), } marshalizer := &mock.MarshalizerMock{} @@ -432,7 +432,7 @@ func TestStakingToPeer_UpdateProtocolCannotSaveAccountNonceShouldErr(t *testing. stakingData := systemSmartContracts.StakingData{ StakeValue: big.NewInt(100), - Address: []byte(address), + Address: []byte(address), } marshalizer := &mock.MarshalizerMock{} @@ -492,7 +492,7 @@ func TestStakingToPeer_UpdateProtocol(t *testing.T) { stakingData := systemSmartContracts.StakingData{ StakeValue: big.NewInt(100), - Address: []byte(address), + Address: []byte(address), } marshalizer := &mock.MarshalizerMock{} @@ -552,7 +552,7 @@ func TestStakingToPeer_UpdateProtocolCannotSaveUnStakedNonceShouldErr(t *testing stakingData := systemSmartContracts.StakingData{ StakeValue: big.NewInt(100), - Address: []byte(address), + Address: []byte(address), } marshalizer := &mock.MarshalizerMock{} @@ -612,7 +612,7 @@ func TestStakingToPeer_UpdateProtocolPeerChangesVerifyPeerChanges(t *testing.T) stakeValue := big.NewInt(100) stakingData := systemSmartContracts.StakingData{ StakeValue: stakeValue, - Address: []byte(address), + Address: []byte(address), } marshalizer := &mock.MarshalizerMock{} @@ -679,7 +679,7 @@ func TestStakingToPeer_VerifyPeerChangesShouldErr(t *testing.T) { stakeValue := big.NewInt(100) stakingData := systemSmartContracts.StakingData{ StakeValue: stakeValue, - Address: []byte(address), + Address: []byte(address), } marshalizer := &mock.MarshalizerMock{} diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index 0fa7c9b2e74..50cdce67f0a 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -13,11 +13,14 @@ import ( "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing/keccak" + "github.com/ElrondNetwork/elrond-go/logger" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) +var log = logger.GetOrCreate("process/smartContract/blockChainHook") + // BlockChainHookImpl is a wrapper over AccountsAdapter that satisfy vmcommon.BlockchainHook interface type BlockChainHookImpl struct { accounts state.AccountsAdapter @@ -149,7 +152,9 @@ func (bh *BlockChainHookImpl) GetStorageData(accountAddress []byte, index []byte return nil, err } - return account.DataTrieTracker().RetrieveValue(index) + value, err := account.DataTrieTracker().RetrieveValue(index) + log.Trace("GetStorageData ", "address", accountAddress, "key", index, "value", value, "error", err) + return value, err } // IsCodeEmpty returns if the code is empty diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 55420b4b6fa..218bcfe5be6 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "fmt" "math/big" + "sort" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/check" @@ -244,11 +245,11 @@ func (sc *scProcessor) prepareSmartContractCall(tx data.TransactionHandler, acnt scr, ok := tx.(*smartContractResult.SmartContractResult) isSCRResultFromCrossShardCall := ok && len(scr.Data) > 0 && scr.Data[0] == '@' if isSCRResultFromCrossShardCall { - dataToParse = "callBack" + tx.GetData() + dataToParse = append([]byte("callBack"), tx.GetData()...) sc.isCallBack = true } - err := sc.argsParser.ParseData(dataToParse) + err := sc.argsParser.ParseData(string(dataToParse)) if err != nil { return err } @@ -514,6 +515,8 @@ func (sc *scProcessor) processVMOutput( return nil, nil, fmt.Errorf(vmOutput.ReturnCode.String()) } + sortVMOutputInsideData(vmOutput) + err = sc.processSCOutputAccounts(vmOutput.OutputAccounts, tx) if err != nil { return nil, nil, err @@ -560,6 +563,23 @@ func (sc *scProcessor) processVMOutput( return scrTxs, consumedFee, nil } +func sortVMOutputInsideData(vmOutput *vmcommon.VMOutput) { + sort.Slice(vmOutput.DeletedAccounts, func(i, j int) bool { + return bytes.Compare(vmOutput.DeletedAccounts[i], vmOutput.DeletedAccounts[j]) < 0 + }) + sort.Slice(vmOutput.TouchedAccounts, func(i, j int) bool { + return bytes.Compare(vmOutput.TouchedAccounts[i], vmOutput.TouchedAccounts[j]) < 0 + }) + sort.Slice(vmOutput.OutputAccounts, func(i, j int) bool { + return bytes.Compare(vmOutput.OutputAccounts[i].Address, vmOutput.OutputAccounts[j].Address) < 0 + }) + for _, outAcc := range vmOutput.OutputAccounts { + sort.Slice(outAcc.StorageUpdates, func(i, j int) bool { + return bytes.Compare(outAcc.StorageUpdates[i].Offset, outAcc.StorageUpdates[j].Offset) < 0 + }) + } +} + func (sc *scProcessor) createSCRsWhenError( tx data.TransactionHandler, returnCode string, @@ -580,7 +600,7 @@ func (sc *scProcessor) createSCRsWhenError( RcvAddr: rcvAddress, SndAddr: tx.GetRecvAddress(), Code: nil, - Data: "@" + hex.EncodeToString([]byte(returnCode)) + "@" + hex.EncodeToString(txHash), + Data: []byte("@" + hex.EncodeToString([]byte(returnCode)) + "@" + hex.EncodeToString(txHash)), TxHash: txHash, } @@ -617,7 +637,7 @@ func (sc *scProcessor) createSmartContractResult( result.RcvAddr = outAcc.Address result.SndAddr = tx.GetRecvAddress() result.Code = outAcc.Code - result.Data = string(outAcc.Data) + sc.argsParser.CreateDataFromStorageUpdate(outAcc.StorageUpdates) + result.Data = append(outAcc.Data, sc.argsParser.CreateDataFromStorageUpdate(outAcc.StorageUpdates)...) result.GasLimit = outAcc.GasLimit result.GasPrice = tx.GetGasPrice() result.TxHash = txHash @@ -671,9 +691,9 @@ func (sc *scProcessor) createSCRForSender( scTx.GasLimit = vmOutput.GasRemaining scTx.GasPrice = tx.GetGasPrice() - scTx.Data = "@" + hex.EncodeToString([]byte(vmOutput.ReturnCode.String())) + scTx.Data = []byte("@" + hex.EncodeToString([]byte(vmOutput.ReturnCode.String()))) for _, retData := range vmOutput.ReturnData { - scTx.Data += "@" + hex.EncodeToString(retData) + scTx.Data = append(scTx.Data, []byte("@"+hex.EncodeToString(retData))...) } if acntSnd == nil || acntSnd.IsInterfaceNil() { @@ -717,6 +737,8 @@ func (sc *scProcessor) processSCOutputAccounts(outputAccounts []*vmcommon.Output for j := 0; j < len(outAcc.StorageUpdates); j++ { storeUpdate := outAcc.StorageUpdates[j] acc.DataTrieTracker().SaveKeyValue(storeUpdate.Offset, storeUpdate.Data) + + log.Trace("storeUpdate", "acc", outAcc.Address, "key", storeUpdate.Offset, "data", storeUpdate.Data) } if len(outAcc.StorageUpdates) > 0 { @@ -734,7 +756,7 @@ func (sc *scProcessor) processSCOutputAccounts(outputAccounts []*vmcommon.Output return err } - log.Debug("created SC address", "address", hex.EncodeToString(outAcc.Address)) + log.Trace("created SC address", "address", hex.EncodeToString(outAcc.Address)) } // change nonce only if there is a change @@ -883,7 +905,7 @@ func (sc *scProcessor) processSimpleSCR( } if len(scr.Data) > 0 { - storageUpdates, err := sc.argsParser.GetStorageUpdates(scr.Data) + storageUpdates, err := sc.argsParser.GetStorageUpdates(string(scr.Data)) if err != nil { log.Debug("storage updates could not be parsed") } diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index a5a2c054313..13dbe95474b 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -350,7 +350,7 @@ func TestScProcessor_DeploySmartContractBadParse(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = generateEmptyByteSlice(addrConverter.AddressLen()) - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, _ := createAccounts(tx) @@ -392,7 +392,7 @@ func TestScProcessor_DeploySmartContractRunError(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = generateEmptyByteSlice(addrConverter.AddressLen()) - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, _ := createAccounts(tx) @@ -444,7 +444,7 @@ func TestScProcessor_DeploySmartContractWrongTx(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, _ := createAccounts(tx) @@ -483,7 +483,7 @@ func TestScProcessor_DeploySmartContract(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = generateEmptyByteSlice(addrConverter.AddressLen()) - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(0) acntSrc, _ := createAccounts(tx) @@ -527,7 +527,7 @@ func TestScProcessor_ExecuteSmartContractTransactionNilTx(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, acntDst := createAccounts(tx) @@ -562,7 +562,7 @@ func TestScProcessor_ExecuteSmartContractTransactionNilAccount(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, acntDst := createAccounts(tx) @@ -605,7 +605,7 @@ func TestScProcessor_ExecuteSmartContractTransactionBadParser(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, acntDst := createAccounts(tx) @@ -648,7 +648,7 @@ func TestScProcessor_ExecuteSmartContractTransactionVMRunError(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST0000000") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, acntDst := createAccounts(tx) @@ -699,7 +699,7 @@ func TestScProcessor_ExecuteSmartContractTransaction(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST0000000") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(0) acntSrc, acntDst := createAccounts(tx) @@ -739,7 +739,7 @@ func TestScProcessor_CreateVMCallInputWrongCode(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) tmpError := errors.New("error") @@ -778,7 +778,7 @@ func TestScProcessor_CreateVMCallInput(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) vmInput, err := sc.CreateVMCallInput(tx) @@ -813,7 +813,7 @@ func TestScProcessor_CreateVMDeployInputBadFunction(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) tmpError := errors.New("error") @@ -858,7 +858,7 @@ func TestScProcessor_CreateVMDeployInput(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data@0000" + tx.Data = []byte("data@0000") tx.Value = big.NewInt(45) vmArg := []byte("00") @@ -899,7 +899,7 @@ func TestScProcessor_CreateVMDeployInputNotEnoughArguments(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data@0000" + tx.Data = []byte("data@0000") tx.Value = big.NewInt(45) vmInput, vmType, err := sc.CreateVMDeployInput(tx) @@ -935,7 +935,7 @@ func TestScProcessor_CreateVMInputWrongArgument(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) tmpError := errors.New("error") @@ -978,7 +978,7 @@ func TestScProcessor_CreateVMInputNotEnoughGas(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) tx.GasLimit = 100 @@ -1014,7 +1014,7 @@ func TestScProcessor_CreateVMInput(t *testing.T) { tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) vmInput, err := sc.CreateVMInput(tx) @@ -1027,7 +1027,7 @@ func createAccountsAndTransaction() (*state.Account, *state.Account, *transactio tx.Nonce = 0 tx.SndAddr = []byte("SRC") tx.RcvAddr = []byte("DST") - tx.Data = "data" + tx.Data = []byte("data") tx.Value = big.NewInt(45) acntSrc, acntDst := createAccounts(tx) @@ -2356,7 +2356,7 @@ func TestScProcessor_ProcessSmartContractResultWithData(t *testing.T) { scr := smartContractResult.SmartContractResult{ RcvAddr: []byte("recv address"), - Data: result, + Data: []byte(result), Value: big.NewInt(15), } err = sc.ProcessSmartContractResult(&scr) @@ -2405,7 +2405,7 @@ func TestScProcessor_ProcessSmartContractResultDeploySCShouldError(t *testing.T) scr := smartContractResult.SmartContractResult{ RcvAddr: []byte("recv address"), - Data: "code@06", + Data: []byte("code@06"), Value: big.NewInt(15), } err = sc.ProcessSmartContractResult(&scr) @@ -2470,7 +2470,7 @@ func TestScProcessor_ProcessSmartContractResultExecuteSC(t *testing.T) { scr := smartContractResult.SmartContractResult{ RcvAddr: []byte("recv address"), - Data: "code@06", + Data: []byte("code@06"), Value: big.NewInt(15), } err = sc.ProcessSmartContractResult(&scr) diff --git a/process/smartContract/scQueryService_test.go b/process/smartContract/scQueryService_test.go index 385fc689857..d7e9355bd3f 100644 --- a/process/smartContract/scQueryService_test.go +++ b/process/smartContract/scQueryService_test.go @@ -211,7 +211,7 @@ func TestExecuteQuery_ShouldCallRunScSequentially(t *testing.T) { uint64(math.MaxUint64), ) - noOfGoRoutines := 1000 + noOfGoRoutines := 50 wg := sync.WaitGroup{} wg.Add(noOfGoRoutines) for i := 0; i < noOfGoRoutines; i++ { diff --git a/process/sync/baseForkDetector.go b/process/sync/baseForkDetector.go index f093007ff5e..939b79ed553 100644 --- a/process/sync/baseForkDetector.go +++ b/process/sync/baseForkDetector.go @@ -6,11 +6,13 @@ import ( "sync" "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/process" ) type headerInfo struct { + epoch uint32 nonce uint64 round uint64 hash []byte @@ -20,16 +22,16 @@ type headerInfo struct { type checkpointInfo struct { nonce uint64 round uint64 + hash []byte } type forkInfo struct { - checkpoint []*checkpointInfo - finalCheckpoint *checkpointInfo - probableHighestNonce uint64 - lastBlockRound uint64 - lastProposedBlockNonce uint64 - shouldForceFork bool - isNotarizedShardStuck bool + checkpoint []*checkpointInfo + finalCheckpoint *checkpointInfo + probableHighestNonce uint64 + highestNonceReceived uint64 + rollBackNonce uint64 + lastRoundWithForcedFork int64 } // baseForkDetector defines a struct with necessary data needed for fork detection @@ -43,6 +45,36 @@ type baseForkDetector struct { blackListHandler process.BlackListHandler genesisTime int64 + blockTracker process.BlockTracker +} + +// SetRollBackNonce sets the nonce where the chain should roll back +func (bfd *baseForkDetector) SetRollBackNonce(nonce uint64) { + bfd.mutFork.Lock() + bfd.fork.rollBackNonce = nonce + bfd.mutFork.Unlock() +} + +func (bfd *baseForkDetector) getRollBackNonce() uint64 { + bfd.mutFork.RLock() + nonce := bfd.fork.rollBackNonce + bfd.mutFork.RUnlock() + + return nonce +} + +func (bfd *baseForkDetector) setLastRoundWithForcedFork(round int64) { + bfd.mutFork.Lock() + bfd.fork.lastRoundWithForcedFork = round + bfd.mutFork.Unlock() +} + +func (bfd *baseForkDetector) lastRoundWithForcedFork() int64 { + bfd.mutFork.RLock() + round := bfd.fork.lastRoundWithForcedFork + bfd.mutFork.RUnlock() + + return round } func (bfd *baseForkDetector) removePastOrInvalidRecords() { @@ -57,6 +89,13 @@ func (bfd *baseForkDetector) checkBlockBasicValidity( state process.BlockHeaderState, ) error { + if check.IfNil(header) { + return ErrNilHeader + } + if headerHash == nil { + return ErrNilHash + } + roundDif := int64(header.GetRound()) - int64(bfd.finalCheckpoint().round) nonceDif := int64(header.GetNonce()) - int64(bfd.finalCheckpoint().nonce) //TODO: Analyze if the acceptance of some headers which came for the next round could generate some attack vectors @@ -85,16 +124,6 @@ func (bfd *baseForkDetector) checkBlockBasicValidity( if roundDif < nonceDif { return ErrHigherNonceInBlock } - if state == process.BHProposed { - if !isRandomSeedValid(header) { - return ErrRandomSeedNotValid - } - } - if state == process.BHReceived || state == process.BHProcessed { - if !isSigned(header) { - return ErrBlockIsNotSigned - } - } return nil } @@ -115,11 +144,9 @@ func (bfd *baseForkDetector) removeInvalidReceivedHeaders() { finalCheckpointRound := bfd.finalCheckpoint().round finalCheckpointNonce := bfd.finalCheckpoint().nonce - var validHdrInfos []*headerInfo - bfd.mutHeaders.Lock() for nonce, hdrInfos := range bfd.headers { - validHdrInfos = nil + validHdrInfos := make([]*headerInfo, 0) for i := 0; i < len(hdrInfos); i++ { roundDif := int64(hdrInfos[i].round) - int64(finalCheckpointRound) nonceDif := int64(hdrInfos[i].nonce) - int64(finalCheckpointNonce) @@ -131,7 +158,7 @@ func (bfd *baseForkDetector) removeInvalidReceivedHeaders() { validHdrInfos = append(validHdrInfos, hdrInfos[i]) } - if validHdrInfos == nil { + if len(validHdrInfos) == 0 { delete(bfd.headers, nonce) continue } @@ -147,7 +174,7 @@ func (bfd *baseForkDetector) removePastCheckpoints() { func (bfd *baseForkDetector) removeCheckpointsBehindNonce(nonce uint64) { bfd.mutFork.Lock() - var preservedCheckpoint []*checkpointInfo + preservedCheckpoint := make([]*checkpointInfo, 0) for i := 0; i < len(bfd.fork.checkpoint); i++ { if bfd.fork.checkpoint[i].nonce < nonce { @@ -177,33 +204,43 @@ func (bfd *baseForkDetector) computeProbableHighestNonce() uint64 { return probableHighestNonce } -// RemoveHeaders removes all the stored headers with a given nonce -func (bfd *baseForkDetector) RemoveHeaders(nonce uint64, hash []byte) { +// RemoveHeader removes the stored header with the given nonce and hash +func (bfd *baseForkDetector) RemoveHeader(nonce uint64, hash []byte) { bfd.removeCheckpointWithNonce(nonce) - var preservedHdrInfos []*headerInfo + preservedHdrsInfo := make([]*headerInfo, 0) bfd.mutHeaders.Lock() - hdrInfos := bfd.headers[nonce] - for _, hdrInfoStored := range hdrInfos { - if hdrInfoStored.state != process.BHNotarized { + + hdrsInfo := bfd.headers[nonce] + for _, hdrInfo := range hdrsInfo { + if hdrInfo.state != process.BHNotarized && bytes.Equal(hash, hdrInfo.hash) { continue } - preservedHdrInfos = append(preservedHdrInfos, hdrInfoStored) + preservedHdrsInfo = append(preservedHdrsInfo, hdrInfo) } - if preservedHdrInfos == nil { + if len(preservedHdrsInfo) == 0 { delete(bfd.headers, nonce) } else { - bfd.headers[nonce] = preservedHdrInfos + bfd.headers[nonce] = preservedHdrsInfo } + bfd.mutHeaders.Unlock() + + probableHighestNonce := bfd.computeProbableHighestNonce() + bfd.setProbableHighestNonce(probableHighestNonce) + + log.Debug("forkDetector.RemoveHeader", + "nonce", nonce, + "hash", hash, + "probable highest nonce", probableHighestNonce) } func (bfd *baseForkDetector) removeCheckpointWithNonce(nonce uint64) { bfd.mutFork.Lock() - var preservedCheckpoint []*checkpointInfo + preservedCheckpoint := make([]*checkpointInfo, 0) for i := 0; i < len(bfd.fork.checkpoint); i++ { if bfd.fork.checkpoint[i].nonce == nonce { @@ -215,35 +252,33 @@ func (bfd *baseForkDetector) removeCheckpointWithNonce(nonce uint64) { bfd.fork.checkpoint = preservedCheckpoint bfd.mutFork.Unlock() + + log.Debug("forkDetector.removeCheckpointWithNonce", + "nonce", nonce, + "last check point nonce", bfd.lastCheckpoint().nonce) } // append adds a new header in the slice found in nonce position // it not adds the header if its hash is already stored in the slice -func (bfd *baseForkDetector) append(hdrInfo *headerInfo) { +func (bfd *baseForkDetector) append(hdrInfo *headerInfo) bool { bfd.mutHeaders.Lock() defer bfd.mutHeaders.Unlock() - // Proposed blocks received do not count for fork choice, as they are not valid until the consensus - // is achieved. They should be received afterwards through sync mechanism. - if hdrInfo.state == process.BHProposed { - bfd.setLastProposedBlockNonce(hdrInfo.nonce) - return - } - hdrInfos := bfd.headers[hdrInfo.nonce] isHdrInfosNilOrEmpty := hdrInfos == nil || len(hdrInfos) == 0 if isHdrInfosNilOrEmpty { bfd.headers[hdrInfo.nonce] = []*headerInfo{hdrInfo} - return + return true } for _, hdrInfoStored := range hdrInfos { if bytes.Equal(hdrInfoStored.hash, hdrInfo.hash) && hdrInfoStored.state == hdrInfo.state { - return + return false } } bfd.headers[hdrInfo.nonce] = append(bfd.headers[hdrInfo.nonce], hdrInfo) + return true } // GetHighestFinalBlockNonce gets the highest nonce of the block which is final and it can not be reverted anymore @@ -251,20 +286,25 @@ func (bfd *baseForkDetector) GetHighestFinalBlockNonce() uint64 { return bfd.finalCheckpoint().nonce } +// GetHighestFinalBlockHash gets the hash of the block which is final and it can not be reverted anymore +func (bfd *baseForkDetector) GetHighestFinalBlockHash() []byte { + return bfd.finalCheckpoint().hash +} + // ProbableHighestNonce gets the probable highest nonce func (bfd *baseForkDetector) ProbableHighestNonce() uint64 { return bfd.probableHighestNonce() } -// ResetProbableHighestNonce resets the probableHighestNonce to checkpoint -func (bfd *baseForkDetector) ResetProbableHighestNonce() { - bfd.setProbableHighestNonce(bfd.lastCheckpoint().nonce) -} - // ResetFork resets the forced fork func (bfd *baseForkDetector) ResetFork() { bfd.cleanupReceivedHeadersHigherThanNonce(bfd.lastCheckpoint().nonce) - bfd.setShouldForceFork(false) + probableHighestNonce := bfd.computeProbableHighestNonce() + bfd.setProbableHighestNonce(probableHighestNonce) + bfd.setLastRoundWithForcedFork(bfd.rounder.Index()) + + log.Debug("forkDetector.ResetFork", + "probable highest nonce", probableHighestNonce) } func (bfd *baseForkDetector) addCheckpoint(checkpoint *checkpointInfo) { @@ -292,10 +332,18 @@ func (bfd *baseForkDetector) setFinalCheckpoint(finalCheckpoint *checkpointInfo) bfd.mutFork.Unlock() } -// RestoreFinalCheckPointToGenesis will set final checkpoint to genesis -func (bfd *baseForkDetector) RestoreFinalCheckPointToGenesis() { +// RestoreToGenesis sets class variables to theirs initial values +func (bfd *baseForkDetector) RestoreToGenesis() { + bfd.mutHeaders.Lock() + bfd.headers = make(map[uint64][]*headerInfo) + bfd.mutHeaders.Unlock() + bfd.mutFork.Lock() - bfd.fork.finalCheckpoint = &checkpointInfo{round: 0, nonce: 0} + checkpoint := &checkpointInfo{} + bfd.fork.checkpoint = []*checkpointInfo{checkpoint} + bfd.fork.finalCheckpoint = checkpoint + bfd.fork.probableHighestNonce = 0 + bfd.fork.highestNonceReceived = 0 bfd.mutFork.Unlock() } @@ -321,60 +369,25 @@ func (bfd *baseForkDetector) probableHighestNonce() uint64 { return probableHighestNonce } -func (bfd *baseForkDetector) setLastBlockRound(round uint64) { - bfd.mutFork.Lock() - bfd.fork.lastBlockRound = round - bfd.mutFork.Unlock() -} - -func (bfd *baseForkDetector) lastBlockRound() uint64 { - bfd.mutFork.RLock() - lastBlockRound := bfd.fork.lastBlockRound - bfd.mutFork.RUnlock() - - return lastBlockRound -} - -func (bfd *baseForkDetector) setLastProposedBlockNonce(nonce uint64) { - bfd.mutFork.Lock() - bfd.fork.lastProposedBlockNonce = nonce - bfd.mutFork.Unlock() -} - -func (bfd *baseForkDetector) lastProposedBlockNonce() uint64 { - bfd.mutFork.RLock() - lastProposedBlockNonce := bfd.fork.lastProposedBlockNonce - bfd.mutFork.RUnlock() - - return lastProposedBlockNonce -} +func (bfd *baseForkDetector) setHighestNonceReceived(nonce uint64) { + if nonce <= bfd.highestNonceReceived() { + return + } -func (bfd *baseForkDetector) setShouldForceFork(shouldForceFork bool) { bfd.mutFork.Lock() - bfd.fork.shouldForceFork = shouldForceFork + bfd.fork.highestNonceReceived = nonce bfd.mutFork.Unlock() -} -func (bfd *baseForkDetector) shouldForceFork() bool { - bfd.mutFork.RLock() - shouldForceFork := bfd.fork.shouldForceFork - bfd.mutFork.RUnlock() - - return shouldForceFork + log.Debug("forkDetector.setHighestNonceReceived", + "highest nonce received", nonce) } -func (bfd *baseForkDetector) setIsNotarizedShardStuck(isNotarizedShardStuck bool) { - bfd.mutFork.Lock() - bfd.fork.isNotarizedShardStuck = isNotarizedShardStuck - bfd.mutFork.Unlock() -} - -func (bfd *baseForkDetector) isNotarizedShardStuck() bool { +func (bfd *baseForkDetector) highestNonceReceived() uint64 { bfd.mutFork.RLock() - isNotarizedShardStuck := bfd.fork.isNotarizedShardStuck + highestNonceReceived := bfd.fork.highestNonceReceived bfd.mutFork.RUnlock() - return isNotarizedShardStuck + return highestNonceReceived } // IsInterfaceNil returns true if there is no value under the interface @@ -388,35 +401,47 @@ func (bfd *baseForkDetector) CheckFork() *process.ForkInfo { forkHeaderRound uint64 forkHeaderHash []byte selfHdrInfo *headerInfo + forkHeaderEpoch uint32 ) forkInfo := process.NewForkInfo() - if bfd.shouldForceFork() { + if bfd.isConsensusStuck() { forkInfo.IsDetected = true return forkInfo } + rollBackNonce := bfd.getRollBackNonce() + if rollBackNonce < math.MaxUint64 { + forkInfo.IsDetected = true + forkInfo.Nonce = rollBackNonce + bfd.SetRollBackNonce(math.MaxUint64) + return forkInfo + } + bfd.mutHeaders.Lock() - for nonce, hdrInfos := range bfd.headers { - if len(hdrInfos) == 1 { + for nonce, hdrsInfo := range bfd.headers { + if len(hdrsInfo) == 1 { continue } selfHdrInfo = nil forkHeaderRound = math.MaxUint64 forkHeaderHash = nil + forkHeaderEpoch = getMaxEpochFromHdrsInfo(hdrsInfo) - for i := 0; i < len(hdrInfos); i++ { - if hdrInfos[i].state == process.BHProcessed { - selfHdrInfo = hdrInfos[i] + for i := 0; i < len(hdrsInfo); i++ { + if hdrsInfo[i].state == process.BHProcessed { + selfHdrInfo = hdrsInfo[i] continue } - forkHeaderHash, forkHeaderRound = bfd.computeForkInfo( - hdrInfos[i], + forkHeaderHash, forkHeaderRound, forkHeaderEpoch = bfd.computeForkInfo( + hdrsInfo[i], forkHeaderHash, - forkHeaderRound) + forkHeaderRound, + forkHeaderEpoch, + ) } if selfHdrInfo == nil { @@ -424,7 +449,7 @@ func (bfd *baseForkDetector) CheckFork() *process.ForkInfo { continue } - if bfd.shouldSignalFork(selfHdrInfo, forkHeaderHash, forkHeaderRound) { + if bfd.shouldSignalFork(selfHdrInfo, forkHeaderHash, forkHeaderRound, forkHeaderEpoch) { forkInfo.IsDetected = true if nonce < forkInfo.Nonce { forkInfo.Nonce = nonce @@ -438,43 +463,76 @@ func (bfd *baseForkDetector) CheckFork() *process.ForkInfo { return forkInfo } +func getMaxEpochFromHdrsInfo(hdrInfos []*headerInfo) uint32 { + maxEpoch := uint32(0) + for _, hdrInfo := range hdrInfos { + if hdrInfo.epoch > maxEpoch { + maxEpoch = hdrInfo.epoch + } + } + return maxEpoch +} + func (bfd *baseForkDetector) computeForkInfo( - headerInfo *headerInfo, + hdrInfo *headerInfo, lastForkHash []byte, lastForkRound uint64, -) ([]byte, uint64) { + lastForkEpoch uint32, +) ([]byte, uint64, uint32) { - if headerInfo.state == process.BHReceivedTooLate { - return lastForkHash, lastForkRound + if hdrInfo.state == process.BHReceivedTooLate && bfd.highestNonceReceived() > hdrInfo.nonce { + return lastForkHash, lastForkRound, lastForkEpoch } - currentForkRound := headerInfo.round - if headerInfo.state == process.BHNotarized { + currentForkRound := hdrInfo.round + if hdrInfo.state == process.BHNotarized { currentForkRound = process.MinForkRound + } else { + if hdrInfo.epoch < lastForkEpoch { + log.Debug("computeForkInfo: epoch change fork choice") + return lastForkHash, lastForkRound, lastForkEpoch + } } if currentForkRound < lastForkRound { - return headerInfo.hash, currentForkRound + return hdrInfo.hash, currentForkRound, hdrInfo.epoch } lowerHashForSameRound := currentForkRound == lastForkRound && - bytes.Compare(headerInfo.hash, lastForkHash) < 0 + bytes.Compare(hdrInfo.hash, lastForkHash) < 0 if lowerHashForSameRound { - return headerInfo.hash, currentForkRound + return hdrInfo.hash, currentForkRound, hdrInfo.epoch } - return lastForkHash, lastForkRound + return lastForkHash, lastForkRound, lastForkEpoch } func (bfd *baseForkDetector) shouldSignalFork( headerInfo *headerInfo, lastForkHash []byte, lastForkRound uint64, + lastForkEpoch uint32, ) bool { sameHash := bytes.Equal(headerInfo.hash, lastForkHash) + if sameHash { + return false + } + + if lastForkRound != process.MinForkRound { + if headerInfo.epoch > lastForkEpoch { + log.Debug("shouldSignalFork epoch change false") + return false + } + + if headerInfo.epoch < lastForkEpoch { + log.Debug("shouldSignalFork epoch change true") + return true + } + } + higherHashForSameRound := headerInfo.round == lastForkRound && bytes.Compare(headerInfo.hash, lastForkHash) > 0 - shouldSignalFork := !sameHash && (headerInfo.round > lastForkRound || higherHashForSameRound) + shouldSignalFork := headerInfo.round > lastForkRound || higherHashForSameRound return shouldSignalFork } @@ -484,7 +542,6 @@ func (bfd *baseForkDetector) isHeaderReceivedTooLate( state process.BlockHeaderState, finality int64, ) bool { - if state == process.BHProcessed { return false } @@ -497,61 +554,25 @@ func (bfd *baseForkDetector) isHeaderReceivedTooLate( return isHeaderReceivedTooLate } -func (bfd *baseForkDetector) activateForcedForkIfNeeded( - header data.HeaderHandler, - state process.BlockHeaderState, -) { - bfd.activateForcedForkOnConsensusStuckIfNeeded(header, state) - bfd.activateForcedForkOnCrossNotarizedStuckIfNeeded(header, state) -} - -func (bfd *baseForkDetector) activateForcedForkOnConsensusStuckIfNeeded( - header data.HeaderHandler, - state process.BlockHeaderState, -) { - if state != process.BHProposed || bfd.isSyncing() { - return +func (bfd *baseForkDetector) isConsensusStuck() bool { + if bfd.lastRoundWithForcedFork() == bfd.rounder.Index() { + return false } - lastCheckpointRound := bfd.lastCheckpoint().round - lastCheckpointNonce := bfd.lastCheckpoint().nonce - - roundsDifference := int64(header.GetRound()) - int64(lastCheckpointRound) - noncesDifference := int64(header.GetNonce()) - int64(lastCheckpointNonce) - isInProperRound := process.IsInProperRound(bfd.rounder.Index()) - - isConsensusStuck := roundsDifference > process.MaxRoundsWithoutCommittedBlock && - noncesDifference <= 1 && - isInProperRound - - if isConsensusStuck { - bfd.setShouldForceFork(true) + if bfd.isSyncing() { + return false } -} -func (bfd *baseForkDetector) activateForcedForkOnCrossNotarizedStuckIfNeeded( - header data.HeaderHandler, - state process.BlockHeaderState, -) { - if state != process.BHProposed || bfd.isSyncing() { - return + roundsDifference := bfd.rounder.Index() - int64(bfd.lastCheckpoint().round) + if roundsDifference <= process.MaxRoundsWithoutCommittedBlock { + return false } - lastCheckpointNonce := bfd.lastCheckpoint().nonce - finalCheckpointNonce := bfd.finalCheckpoint().nonce - - noncesDifference := int64(header.GetNonce()) - int64(lastCheckpointNonce) - noncesWithoutCrossNotarizedDifference := int64(header.GetNonce()) - int64(finalCheckpointNonce) - isInProperRound := process.IsInProperRound(bfd.rounder.Index()) - - isCrossNotarizedStuck := noncesWithoutCrossNotarizedDifference > process.MaxNoncesWithoutCrossNotarized && - noncesDifference <= 1 && - isInProperRound && - !bfd.isNotarizedShardStuck() - - if isCrossNotarizedStuck { - bfd.setShouldForceFork(true) + if !process.IsInProperRound(bfd.rounder.Index()) { + return false } + + return true } func (bfd *baseForkDetector) isSyncing() bool { @@ -577,27 +598,27 @@ func (bfd *baseForkDetector) GetNotarizedHeaderHash(nonce uint64) []byte { func (bfd *baseForkDetector) cleanupReceivedHeadersHigherThanNonce(nonce uint64) { bfd.mutHeaders.Lock() - for hdrNonce, hdrInfos := range bfd.headers { - if hdrNonce <= nonce { + for hdrsNonce, hdrsInfo := range bfd.headers { + if hdrsNonce <= nonce { continue } - preservedHdrInfos := make([]*headerInfo, 0, len(hdrInfos)) + preservedHdrsInfo := make([]*headerInfo, 0) - for _, hdrInfo := range hdrInfos { + for _, hdrInfo := range hdrsInfo { if hdrInfo.state != process.BHNotarized { continue } - preservedHdrInfos = append(preservedHdrInfos, hdrInfo) + preservedHdrsInfo = append(preservedHdrsInfo, hdrInfo) } - if len(preservedHdrInfos) == 0 { - delete(bfd.headers, hdrNonce) + if len(preservedHdrsInfo) == 0 { + delete(bfd.headers, hdrsNonce) continue } - bfd.headers[hdrNonce] = preservedHdrInfos + bfd.headers[hdrsNonce] = preservedHdrsInfo } bfd.mutHeaders.Unlock() } @@ -606,3 +627,68 @@ func (bfd *baseForkDetector) computeGenesisTimeFromHeader(headerHandler data.Hea genesisTime := int64(headerHandler.GetTimeStamp() - headerHandler.GetRound()*uint64(bfd.rounder.TimeDuration().Seconds())) return genesisTime } + +func (bfd *baseForkDetector) addHeader( + header data.HeaderHandler, + headerHash []byte, + state process.BlockHeaderState, + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, + doJobOnBHProcessed func(data.HeaderHandler, []byte, []data.HeaderHandler, [][]byte), +) error { + + err := bfd.checkBlockBasicValidity(header, headerHash, state) + if err != nil { + return err + } + + bfd.processReceivedBlock(header, headerHash, state, selfNotarizedHeaders, selfNotarizedHeadersHashes, doJobOnBHProcessed) + return nil +} + +func (bfd *baseForkDetector) processReceivedBlock( + header data.HeaderHandler, + headerHash []byte, + state process.BlockHeaderState, + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, + doJobOnBHProcessed func(data.HeaderHandler, []byte, []data.HeaderHandler, [][]byte), +) { + bfd.setHighestNonceReceived(header.GetNonce()) + + if state == process.BHProposed { + return + } + + isHeaderReceivedTooLate := bfd.isHeaderReceivedTooLate(header, state, process.BlockFinality) + if isHeaderReceivedTooLate { + state = process.BHReceivedTooLate + } + + appended := bfd.append(&headerInfo{ + epoch: header.GetEpoch(), + nonce: header.GetNonce(), + round: header.GetRound(), + hash: headerHash, + state: state, + }) + if !appended { + return + } + + if state == process.BHProcessed { + doJobOnBHProcessed(header, headerHash, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } + + probableHighestNonce := bfd.computeProbableHighestNonce() + bfd.setProbableHighestNonce(probableHighestNonce) + + log.Debug("forkDetector.AddHeader", + "round", header.GetRound(), + "nonce", header.GetNonce(), + "hash", headerHash, + "state", state, + "probable highest nonce", bfd.probableHighestNonce(), + "last check point nonce", bfd.lastCheckpoint().nonce, + "final check point nonce", bfd.finalCheckpoint().nonce) +} diff --git a/process/sync/baseForkDetector_test.go b/process/sync/baseForkDetector_test.go index d6d8971159b..6d3d6601f16 100644 --- a/process/sync/baseForkDetector_test.go +++ b/process/sync/baseForkDetector_test.go @@ -16,7 +16,12 @@ import ( func TestNewBasicForkDetector_ShouldErrNilRounder(t *testing.T) { t.Parallel() - bfd, err := sync.NewShardForkDetector(nil, &mock.BlackListHandlerStub{}, 0) + bfd, err := sync.NewShardForkDetector( + nil, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) assert.Equal(t, process.ErrNilRounder, err) assert.Nil(t, bfd) } @@ -25,16 +30,40 @@ func TestNewBasicForkDetector_ShouldErrNilBlackListHandler(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, err := sync.NewShardForkDetector(rounderMock, nil, 0) + bfd, err := sync.NewShardForkDetector( + rounderMock, + nil, + &mock.BlockTrackerMock{}, + 0, + ) assert.Equal(t, process.ErrNilBlackListHandler, err) assert.Nil(t, bfd) } +func TestNewBasicForkDetector_ShouldErrNilBlockTracker(t *testing.T) { + t.Parallel() + + rounderMock := &mock.RounderMock{RoundIndex: 100} + bfd, err := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + nil, + 0, + ) + assert.Equal(t, process.ErrNilBlockTracker, err) + assert.Nil(t, bfd) +} + func TestNewBasicForkDetector_ShouldWork(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, err := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, err := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) assert.Nil(t, err) assert.NotNil(t, bfd) } @@ -48,7 +77,12 @@ func TestBasicForkDetector_CheckBlockValidityShouldErrGenesisTimeMissmatch(t *te incorrectTimeStamp := uint64(genesisTime + int64(roundTimeDuration)*int64(round) - 1) rounderMock := &mock.RounderMock{RoundIndex: 1, RoundTimeDuration: roundTimeDuration} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, genesisTime) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + genesisTime, + ) err := bfd.CheckBlockValidity(&block.Header{Nonce: 1, Round: round, TimeStamp: incorrectTimeStamp}, []byte("hash"), process.BHProposed) assert.Equal(t, sync.ErrGenesisTimeMissmatch, err) @@ -61,8 +95,13 @@ func TestBasicForkDetector_CheckBlockValidityShouldErrLowerRoundInBlock(t *testi t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - bfd.SetFinalCheckpoint(1, 1) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + bfd.SetFinalCheckpoint(1, 1, nil) err := bfd.CheckBlockValidity(&block.Header{PubKeysBitmap: []byte("X")}, []byte("hash"), process.BHProcessed) assert.Equal(t, sync.ErrLowerRoundInBlock, err) } @@ -71,8 +110,13 @@ func TestBasicForkDetector_CheckBlockValidityShouldErrLowerNonceInBlock(t *testi t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - bfd.SetFinalCheckpoint(2, 2) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + bfd.SetFinalCheckpoint(2, 2, nil) err := bfd.CheckBlockValidity(&block.Header{Nonce: 1, Round: 3, PubKeysBitmap: []byte("X")}, []byte("hash"), process.BHProcessed) assert.Equal(t, sync.ErrLowerNonceInBlock, err) } @@ -81,7 +125,12 @@ func TestBasicForkDetector_CheckBlockValidityShouldErrHigherRoundInBlock(t *test t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 0} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) err := bfd.CheckBlockValidity(&block.Header{Nonce: 1, Round: 2, PubKeysBitmap: []byte("X")}, []byte("hash"), process.BHProcessed) assert.Equal(t, sync.ErrHigherRoundInBlock, err) } @@ -90,34 +139,26 @@ func TestBasicForkDetector_CheckBlockValidityShouldErrHigherNonceInBlock(t *test t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) err := bfd.CheckBlockValidity(&block.Header{Nonce: 2, Round: 1, PubKeysBitmap: []byte("X")}, []byte("hash"), process.BHProcessed) assert.Equal(t, sync.ErrHigherNonceInBlock, err) } -func TestBasicForkDetector_CheckBlockValidityShouldErrRandomSeedIsNotValid(t *testing.T) { - t.Parallel() - - rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.CheckBlockValidity(&block.Header{Nonce: 1, Round: 1}, []byte("hash"), process.BHProposed) - assert.Equal(t, sync.ErrRandomSeedNotValid, err) -} - -func TestBasicForkDetector_CheckBlockValidityShouldErrBlockIsNotSigned(t *testing.T) { - t.Parallel() - - rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.CheckBlockValidity(&block.Header{Nonce: 1, Round: 1}, []byte("hash"), process.BHProcessed) - assert.Equal(t, sync.ErrBlockIsNotSigned, err) -} - func TestBasicForkDetector_CheckBlockValidityShouldWork(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) err := bfd.CheckBlockValidity(&block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")}, []byte("hash"), process.BHProcessed) assert.Nil(t, err) } @@ -130,12 +171,17 @@ func TestBasicForkDetector_RemoveHeadersShouldWork(t *testing.T) { hdr2 := &block.Header{Nonce: 2, Round: 2, PubKeysBitmap: []byte("X")} hash2 := []byte("hash2") rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) rounderMock.RoundIndex = 1 - _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil) rounderMock.RoundIndex = 2 - _ = bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil) hInfos := bfd.GetHeaders(1) assert.Equal(t, 1, len(hInfos)) @@ -143,7 +189,7 @@ func TestBasicForkDetector_RemoveHeadersShouldWork(t *testing.T) { hInfos = bfd.GetHeaders(2) assert.Equal(t, 1, len(hInfos)) - bfd.RemoveHeaders(1, hash1) + bfd.RemoveHeader(1, hash1) hInfos = bfd.GetHeaders(1) assert.Nil(t, hInfos) @@ -155,24 +201,25 @@ func TestBasicForkDetector_RemoveHeadersShouldWork(t *testing.T) { func TestBasicForkDetector_CheckForkOnlyOneShardHeaderOnANonceShouldReturnFalse(t *testing.T) { t.Parallel() - rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + rounderMock := &mock.RounderMock{RoundIndex: 99} + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) _ = bfd.AddHeader( &block.Header{Nonce: 0, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHProcessed, nil, - nil, - false, - ) + nil) _ = bfd.AddHeader( &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")}, []byte("hash2"), process.BHProcessed, nil, - nil, - false, - ) + nil) forkInfo := bfd.CheckFork() assert.False(t, forkInfo.IsDetected) assert.Equal(t, uint64(math.MaxUint64), forkInfo.Nonce) @@ -182,15 +229,19 @@ func TestBasicForkDetector_CheckForkOnlyOneShardHeaderOnANonceShouldReturnFalse( func TestBasicForkDetector_CheckForkOnlyReceivedHeadersShouldReturnFalse(t *testing.T) { t.Parallel() - rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + rounderMock := &mock.RounderMock{RoundIndex: 99} + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) _ = bfd.AddHeader( &block.Header{Nonce: 0, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHReceived, nil, nil, - false, ) _ = bfd.AddHeader( &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")}, @@ -198,7 +249,6 @@ func TestBasicForkDetector_CheckForkOnlyReceivedHeadersShouldReturnFalse(t *test process.BHReceived, nil, nil, - false, ) forkInfo := bfd.CheckFork() assert.False(t, forkInfo.IsDetected) @@ -209,15 +259,19 @@ func TestBasicForkDetector_CheckForkOnlyReceivedHeadersShouldReturnFalse(t *test func TestBasicForkDetector_CheckForkOnlyOneShardHeaderOnANonceReceivedAndProcessedShouldReturnFalse(t *testing.T) { t.Parallel() - rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + rounderMock := &mock.RounderMock{RoundIndex: 99} + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) _ = bfd.AddHeader( &block.Header{Nonce: 0, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHProcessed, nil, nil, - false, ) _ = bfd.AddHeader( &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")}, @@ -225,7 +279,6 @@ func TestBasicForkDetector_CheckForkOnlyOneShardHeaderOnANonceReceivedAndProcess process.BHReceived, nil, nil, - false, ) forkInfo := bfd.CheckFork() assert.False(t, forkInfo.IsDetected) @@ -237,21 +290,24 @@ func TestBasicForkDetector_CheckForkMetaHeaderProcessedShouldReturnFalse(t *test t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 99} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 3, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHProcessed, nil, - nil, - false) + nil) _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 2, PubKeysBitmap: []byte("X")}, []byte("hash2"), process.BHProcessed, nil, - nil, - false) + nil) forkInfo := bfd.CheckFork() assert.False(t, forkInfo.IsDetected) assert.Equal(t, uint64(math.MaxUint64), forkInfo.Nonce) @@ -262,30 +318,33 @@ func TestBasicForkDetector_CheckForkMetaHeaderProcessedShouldReturnFalseWhenLowe t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) rounderMock.RoundIndex = 5 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHReceived, nil, - nil, - false) + nil) rounderMock.RoundIndex = 4 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 3, PubKeysBitmap: []byte("X")}, []byte("hash2"), process.BHReceived, nil, - nil, - false) + nil) + rounderMock.RoundIndex = 3 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 2, PubKeysBitmap: []byte("X")}, []byte("hash3"), process.BHProcessed, nil, - nil, - false) + nil) hInfos := bfd.GetHeaders(1) assert.Equal(t, 3, len(hInfos)) @@ -303,29 +362,31 @@ func TestBasicForkDetector_CheckForkMetaHeaderProcessedShouldReturnFalseWhenEqua t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) rounderMock.RoundIndex = 5 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHProcessed, nil, - nil, - false) + nil) _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")}, []byte("hash2"), process.BHReceived, nil, - nil, - false) + nil) _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")}, []byte("hash3"), process.BHReceived, nil, - nil, - false) + nil) hInfos := bfd.GetHeaders(1) assert.Equal(t, 3, len(hInfos)) @@ -344,7 +405,12 @@ func TestBasicForkDetector_CheckForkShardHeaderProcessedShouldReturnTrueWhenEqua t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) hdr1 := &block.Header{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")} hash1 := []byte("hash1") @@ -353,16 +419,16 @@ func TestBasicForkDetector_CheckForkShardHeaderProcessedShouldReturnTrueWhenEqua hdr3 := &block.Header{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")} hash3 := []byte("hash3") - finalHeaders2 := []data.HeaderHandler{ + selfNotarizedHeaders2 := []data.HeaderHandler{ hdr2, } - finalHeadersHashes2 := [][]byte{ + selfNotarizedHeadersHashes2 := [][]byte{ hash2, } - finalHeaders3 := []data.HeaderHandler{ + selfNotarizedHeaders3 := []data.HeaderHandler{ hdr3, } - finalHeadersHashes3 := [][]byte{ + selfNotarizedHeadersHashes3 := [][]byte{ hash3, } @@ -372,22 +438,19 @@ func TestBasicForkDetector_CheckForkShardHeaderProcessedShouldReturnTrueWhenEqua hash1, process.BHProcessed, nil, - nil, - false) + nil) _ = bfd.AddHeader( hdr2, hash2, process.BHNotarized, - finalHeaders2, - finalHeadersHashes2, - false) + selfNotarizedHeaders2, + selfNotarizedHeadersHashes2) _ = bfd.AddHeader( hdr3, hash3, process.BHNotarized, - finalHeaders3, - finalHeadersHashes3, - false) + selfNotarizedHeaders3, + selfNotarizedHeadersHashes3) hInfos := bfd.GetHeaders(1) assert.Equal(t, 3, len(hInfos)) @@ -406,29 +469,31 @@ func TestBasicForkDetector_CheckForkMetaHeaderProcessedShouldReturnTrueWhenEqual t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) rounderMock.RoundIndex = 5 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")}, []byte("hash2"), process.BHProcessed, nil, - nil, - false) + nil) _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")}, []byte("hash3"), process.BHReceived, nil, - nil, - false) + nil) _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHReceived, nil, - nil, - false) + nil) hInfos := bfd.GetHeaders(1) assert.Equal(t, 3, len(hInfos)) @@ -446,7 +511,12 @@ func TestBasicForkDetector_CheckForkShardHeaderProcessedShouldReturnTrueWhenEqua t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) hdr1 := &block.Header{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")} hash1 := []byte("hash1") @@ -455,16 +525,16 @@ func TestBasicForkDetector_CheckForkShardHeaderProcessedShouldReturnTrueWhenEqua hdr3 := &block.Header{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")} hash3 := []byte("hash3") - finalHeaders1 := []data.HeaderHandler{ + selfNotarizedHeaders1 := []data.HeaderHandler{ hdr1, } - finalHeadersHashes1 := [][]byte{ + selfNotarizedHeadersHashes1 := [][]byte{ hash1, } - finalHeaders3 := []data.HeaderHandler{ + selfNotarizedHeaders3 := []data.HeaderHandler{ hdr3, } - finalHeadersHashes3 := [][]byte{ + selfNotarizedHeadersHashes3 := [][]byte{ hash3, } @@ -474,22 +544,19 @@ func TestBasicForkDetector_CheckForkShardHeaderProcessedShouldReturnTrueWhenEqua hash2, process.BHProcessed, nil, - nil, - false) + nil) _ = bfd.AddHeader( hdr3, hash3, process.BHNotarized, - finalHeaders3, - finalHeadersHashes3, - false) + selfNotarizedHeaders3, + selfNotarizedHeadersHashes3) _ = bfd.AddHeader( hdr1, hash1, process.BHNotarized, - finalHeaders1, - finalHeadersHashes1, - false) + selfNotarizedHeaders1, + selfNotarizedHeadersHashes1) hInfos := bfd.GetHeaders(1) assert.Equal(t, 3, len(hInfos)) @@ -507,31 +574,33 @@ func TestBasicForkDetector_CheckForkShouldReturnTrue(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) rounderMock.RoundIndex = 4 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 3, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHReceived, nil, - nil, - false) + nil) rounderMock.RoundIndex = 3 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 2, PubKeysBitmap: []byte("X")}, []byte("hash2"), process.BHReceived, nil, - nil, - false) + nil) rounderMock.RoundIndex = 4 _ = bfd.AddHeader( &block.MetaBlock{Nonce: 1, Round: 3, PubKeysBitmap: []byte("X")}, []byte("hash3"), process.BHProcessed, nil, - nil, - false) + nil) hInfos := bfd.GetHeaders(1) assert.Equal(t, 3, len(hInfos)) @@ -555,11 +624,16 @@ func TestBasicForkDetector_RemovePastHeadersShouldWork(t *testing.T) { hdr3 := &block.Header{Nonce: 3, PubKeysBitmap: []byte("X")} hash3 := []byte("hash3") rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - _ = bfd.AddHeader(hdr1, hash1, process.BHReceived, nil, nil, false) - _ = bfd.AddHeader(hdr2, hash2, process.BHReceived, nil, nil, false) - _ = bfd.AddHeader(hdr3, hash3, process.BHReceived, nil, nil, false) - bfd.SetFinalCheckpoint(4, 4) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + _ = bfd.AddHeader(hdr1, hash1, process.BHReceived, nil, nil) + _ = bfd.AddHeader(hdr2, hash2, process.BHReceived, nil, nil) + _ = bfd.AddHeader(hdr3, hash3, process.BHReceived, nil, nil) + bfd.SetFinalCheckpoint(4, 4, nil) bfd.RemovePastHeaders() hInfos := bfd.GetHeaders(3) @@ -584,16 +658,21 @@ func TestBasicForkDetector_RemoveInvalidReceivedHeadersShouldWork(t *testing.T) hdr3 := &block.Header{PubKeysBitmap: []byte("X"), Nonce: 10, Round: 14} hash3 := []byte("hash3") rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) rounderMock.RoundIndex = 11 - _ = bfd.AddHeader(hdr0, hash0, process.BHReceived, nil, nil, false) + _ = bfd.AddHeader(hdr0, hash0, process.BHReceived, nil, nil) rounderMock.RoundIndex = 13 - _ = bfd.AddHeader(hdr1, hash1, process.BHReceived, nil, nil, false) + _ = bfd.AddHeader(hdr1, hash1, process.BHReceived, nil, nil) rounderMock.RoundIndex = 16 - _ = bfd.AddHeader(hdr2, hash2, process.BHReceived, nil, nil, false) + _ = bfd.AddHeader(hdr2, hash2, process.BHReceived, nil, nil) rounderMock.RoundIndex = 15 - _ = bfd.AddHeader(hdr3, hash3, process.BHReceived, nil, nil, false) - bfd.SetFinalCheckpoint(9, 12) + _ = bfd.AddHeader(hdr3, hash3, process.BHReceived, nil, nil) + bfd.SetFinalCheckpoint(9, 12, nil) bfd.RemoveInvalidReceivedHeaders() hInfos := bfd.GetHeaders(8) @@ -617,12 +696,17 @@ func TestBasicForkDetector_RemoveCheckpointHeaderNonceShouldResetCheckpoint(t *t hdr1 := &block.Header{Nonce: 2, Round: 2, PubKeysBitmap: []byte("X")} hash1 := []byte("hash1") rounderMock := &mock.RounderMock{RoundIndex: 2} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) - _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil) assert.Equal(t, uint64(2), bfd.LastCheckpointNonce()) - bfd.RemoveHeaders(2, hash1) + bfd.RemoveHeader(2, hash1) assert.Equal(t, uint64(0), bfd.LastCheckpointNonce()) assert.Equal(t, uint64(0), bfd.LastCheckpointRound()) } @@ -631,30 +715,35 @@ func TestBasicForkDetector_GetHighestFinalBlockNonce(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) hdr1 := &block.MetaBlock{Nonce: 2, Round: 1, PubKeysBitmap: []byte("X")} hash1 := []byte("hash1") rounderMock.RoundIndex = 1 - _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil) assert.Equal(t, uint64(0), bfd.GetHighestFinalBlockNonce()) hdr2 := &block.MetaBlock{Nonce: 3, Round: 3, PubKeysBitmap: []byte("X")} hash2 := []byte("hash2") rounderMock.RoundIndex = 3 - _ = bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil) assert.Equal(t, uint64(0), bfd.GetHighestFinalBlockNonce()) hdr3 := &block.MetaBlock{Nonce: 4, Round: 4, PubKeysBitmap: []byte("X")} hash3 := []byte("hash3") rounderMock.RoundIndex = 4 - _ = bfd.AddHeader(hdr3, hash3, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr3, hash3, process.BHProcessed, nil, nil) assert.Equal(t, uint64(3), bfd.GetHighestFinalBlockNonce()) hdr4 := &block.MetaBlock{Nonce: 6, Round: 5, PubKeysBitmap: []byte("X")} hash4 := []byte("hash4") rounderMock.RoundIndex = 5 - _ = bfd.AddHeader(hdr4, hash4, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr4, hash4, process.BHProcessed, nil, nil) assert.Equal(t, uint64(3), bfd.GetHighestFinalBlockNonce()) } @@ -662,7 +751,12 @@ func TestBasicForkDetector_ProbableHighestNonce(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) rounderMock.RoundIndex = 11 _ = bfd.AddHeader( @@ -670,8 +764,7 @@ func TestBasicForkDetector_ProbableHighestNonce(t *testing.T) { []byte("hash0"), process.BHReceived, nil, - nil, - false) + nil) assert.Equal(t, uint64(8), bfd.ProbableHighestNonce()) rounderMock.RoundIndex = 13 @@ -680,8 +773,7 @@ func TestBasicForkDetector_ProbableHighestNonce(t *testing.T) { []byte("hash1"), process.BHProcessed, nil, - nil, - false) + nil) assert.Equal(t, uint64(9), bfd.ProbableHighestNonce()) rounderMock.RoundIndex = 16 @@ -690,8 +782,7 @@ func TestBasicForkDetector_ProbableHighestNonce(t *testing.T) { []byte("hash2"), process.BHReceived, nil, - nil, - false) + nil) assert.Equal(t, uint64(13), bfd.ProbableHighestNonce()) rounderMock.RoundIndex = 15 @@ -700,8 +791,7 @@ func TestBasicForkDetector_ProbableHighestNonce(t *testing.T) { []byte("hash3"), process.BHProcessed, nil, - nil, - false) + nil) assert.Equal(t, uint64(10), bfd.ProbableHighestNonce()) rounderMock.RoundIndex = 16 @@ -710,8 +800,7 @@ func TestBasicForkDetector_ProbableHighestNonce(t *testing.T) { []byte("hash3"), process.BHReceived, nil, - nil, - false) + nil) assert.Equal(t, uint64(11), bfd.ProbableHighestNonce()) } @@ -719,23 +808,17 @@ func TestShardForkDetector_ShouldAddBlockInForkDetectorShouldWork(t *testing.T) t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 10} - sfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + sfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) hdr := &block.Header{Nonce: 1, Round: 1} - receivedTooLate := sfd.IsHeaderReceivedTooLate(hdr, process.BHProcessed, process.ShardBlockFinality) + receivedTooLate := sfd.IsHeaderReceivedTooLate(hdr, process.BHProcessed, process.BlockFinality) assert.False(t, receivedTooLate) - receivedTooLate = sfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.ShardBlockFinality) + receivedTooLate = sfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.BlockFinality) assert.True(t, receivedTooLate) - receivedTooLate = sfd.IsHeaderReceivedTooLate(hdr, process.BHProposed, process.ShardBlockFinality) - assert.True(t, receivedTooLate) - - hdr.Round = uint64(rounderMock.RoundIndex - process.ShardBlockFinality) - receivedTooLate = sfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.ShardBlockFinality) - assert.False(t, receivedTooLate) - - receivedTooLate = sfd.IsHeaderReceivedTooLate(hdr, process.BHProposed, process.ShardBlockFinality) + hdr.Round = uint64(rounderMock.RoundIndex - process.BlockFinality) + receivedTooLate = sfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.BlockFinality) assert.False(t, receivedTooLate) } @@ -743,16 +826,11 @@ func TestShardForkDetector_ShouldAddBlockInForkDetectorShouldErrLowerRoundInBloc t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 10} - sfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + sfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) hdr := &block.Header{Nonce: 1, Round: 1} - hdr.Round = uint64(rounderMock.RoundIndex - process.ShardBlockFinality - 1) - receivedTooLate := sfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.ShardBlockFinality) - assert.True(t, receivedTooLate) - - sfd.AddCheckPoint(2, hdr.GetNonce()+process.NonceDifferenceWhenSynced) - sfd.SetProbableHighestNonce(hdr.GetNonce() + process.NonceDifferenceWhenSynced) - receivedTooLate = sfd.IsHeaderReceivedTooLate(hdr, process.BHProposed, process.ShardBlockFinality) + hdr.Round = uint64(rounderMock.RoundIndex - process.BlockFinality - 1) + receivedTooLate := sfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.BlockFinality) assert.True(t, receivedTooLate) } @@ -760,23 +838,17 @@ func TestMetaForkDetector_ShouldAddBlockInForkDetectorShouldWork(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 10} - mfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + mfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) hdr := &block.MetaBlock{Nonce: 1, Round: 1} - receivedTooLate := mfd.IsHeaderReceivedTooLate(hdr, process.BHProcessed, process.MetaBlockFinality) + receivedTooLate := mfd.IsHeaderReceivedTooLate(hdr, process.BHProcessed, process.BlockFinality) assert.False(t, receivedTooLate) - receivedTooLate = mfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.MetaBlockFinality) + receivedTooLate = mfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.BlockFinality) assert.True(t, receivedTooLate) - receivedTooLate = mfd.IsHeaderReceivedTooLate(hdr, process.BHProposed, process.MetaBlockFinality) - assert.True(t, true) - - hdr.Round = uint64(rounderMock.RoundIndex - process.MetaBlockFinality) - receivedTooLate = mfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.MetaBlockFinality) - assert.False(t, receivedTooLate) - - receivedTooLate = mfd.IsHeaderReceivedTooLate(hdr, process.BHProposed, process.MetaBlockFinality) + hdr.Round = uint64(rounderMock.RoundIndex - process.BlockFinality) + receivedTooLate = mfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.BlockFinality) assert.False(t, receivedTooLate) } @@ -784,27 +856,22 @@ func TestMetaForkDetector_ShouldAddBlockInForkDetectorShouldErrLowerRoundInBlock t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 10} - mfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + mfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) hdr := &block.MetaBlock{Nonce: 1, Round: 1} - hdr.Round = uint64(rounderMock.RoundIndex - process.MetaBlockFinality - 1) - receivedTooLate := mfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.MetaBlockFinality) - assert.True(t, receivedTooLate) - - mfd.AddCheckPoint(2, hdr.GetNonce()+process.NonceDifferenceWhenSynced) - mfd.SetProbableHighestNonce(hdr.GetNonce() + process.NonceDifferenceWhenSynced) - receivedTooLate = mfd.IsHeaderReceivedTooLate(hdr, process.BHProposed, process.MetaBlockFinality) + hdr.Round = uint64(rounderMock.RoundIndex - process.BlockFinality - 1) + receivedTooLate := mfd.IsHeaderReceivedTooLate(hdr, process.BHReceived, process.BlockFinality) assert.True(t, receivedTooLate) } -func TestShardForkDetector_AddFinalHeadersShouldNotChangeTheFinalCheckpoint(t *testing.T) { +func TestShardForkDetector_AddNotarizedHeadersShouldNotChangeTheFinalCheckpoint(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 10} - sfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + sfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) hdr1 := &block.Header{Nonce: 3, Round: 3} hash1 := []byte("hash1") - hdr2 := &block.Header{Nonce: 1, Round: 1} + hdr2 := &block.Header{Nonce: 3, Round: 3} hash2 := []byte("hash2") hdr3 := &block.Header{Nonce: 4, Round: 5} hash3 := []byte("hash3") @@ -813,108 +880,94 @@ func TestShardForkDetector_AddFinalHeadersShouldNotChangeTheFinalCheckpoint(t *t hashes := make([][]byte, 0) hdrs = append(hdrs, hdr1) hashes = append(hashes, hash1) - sfd.AddFinalHeaders(hdrs, hashes) + + sfd.ReceivedSelfNotarizedHeaders(0, hdrs, hashes) + assert.Equal(t, uint64(0), sfd.FinalCheckpointNonce()) + + _ = sfd.AddHeader(hdr1, hash1, process.BHProcessed, hdrs, hashes) assert.Equal(t, hdr1.Nonce, sfd.FinalCheckpointNonce()) hdrs = make([]data.HeaderHandler, 0) hashes = make([][]byte, 0) hdrs = append(hdrs, hdr2) hashes = append(hashes, hash2) - sfd.AddFinalHeaders(hdrs, hashes) + + sfd.ReceivedSelfNotarizedHeaders(0, hdrs, hashes) assert.Equal(t, hdr1.Nonce, sfd.FinalCheckpointNonce()) + _ = sfd.AddHeader(hdr2, hash2, process.BHProcessed, hdrs, hashes) + assert.Equal(t, hdr2.Nonce, sfd.FinalCheckpointNonce()) + hdrs = make([]data.HeaderHandler, 0) hashes = make([][]byte, 0) hdrs = append(hdrs, hdr3) hashes = append(hashes, hash3) - sfd.AddFinalHeaders(hdrs, hashes) - assert.Equal(t, hdr3.Nonce, sfd.FinalCheckpointNonce()) -} - -func TestBaseForkDetector_ActivateForcedForkIfNeededStateNotProposedShouldNotActivate(t *testing.T) { - t.Parallel() - rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - - state := process.BHReceived - hdr1 := &block.Header{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")} + sfd.ReceivedSelfNotarizedHeaders(0, hdrs, hashes) + assert.Equal(t, hdr2.Nonce, sfd.FinalCheckpointNonce()) - bfd.ActivateForcedForkIfNeeded(hdr1, state) - assert.False(t, bfd.ShouldForceFork()) + _ = sfd.AddHeader(hdr3, hash3, process.BHProcessed, hdrs, hashes) + assert.Equal(t, hdr3.Nonce, sfd.FinalCheckpointNonce()) } -func TestBaseForkDetector_ActivateForcedForkIfNeededNotSyncingShouldNotActivate(t *testing.T) { +func TestBaseForkDetector_IsConsensusStuckNotSyncingShouldReturnFalse(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) - state := process.BHProposed - hdr1 := &block.Header{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")} + bfd.SetProbableHighestNonce(1) - bfd.ActivateForcedForkIfNeeded(hdr1, state) - assert.False(t, bfd.ShouldForceFork()) + assert.False(t, bfd.IsConsensusStuck()) } -func TestBaseForkDetector_ActivateForcedForkIfNeededDifferencesNotEnoughShouldNotActivate(t *testing.T) { +func TestBaseForkDetector_IsConsensusStuckNoncesDifferencesNotEnoughShouldReturnFalse(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - - _ = bfd.AddHeader( - &block.MetaBlock{PubKeysBitmap: []byte("X"), Nonce: 9, Round: 3}, - []byte("hash1"), - process.BHProcessed, - nil, - nil, - false) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) - state := process.BHProposed - hdr1 := &block.Header{Nonce: 1, Round: 4, PubKeysBitmap: []byte("X")} - rounderMock.RoundIndex = 5 - bfd.ActivateForcedForkIfNeeded(hdr1, state) - assert.False(t, bfd.ShouldForceFork()) + rounderMock.RoundIndex = 10 + assert.False(t, bfd.IsConsensusStuck()) } -func TestBaseForkDetector_ActivateForcedForkIfNeededShouldActivate(t *testing.T) { +func TestBaseForkDetector_IsConsensusStuckNotInProperRoundShouldReturnFalse(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - - bfd.SetFinalCheckpoint(0, 0) - _ = bfd.AddHeader( - &block.MetaBlock{PubKeysBitmap: []byte("X"), Nonce: 0, Round: 28}, - []byte("hash1"), - process.BHProcessed, - nil, - nil, - false) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) - // last checkpoint will be (round = 0 , nonce = 0) - // round difference is higher than 20 - // nonce difference is 1 - // round index is divisible by 5 - // => should activate force fork - state := process.BHProposed - hdr1 := &block.Header{Nonce: 1, Round: 29, PubKeysBitmap: []byte("X")} - rounderMock.RoundIndex = 30 - bfd.ActivateForcedForkIfNeeded(hdr1, state) - assert.True(t, bfd.ShouldForceFork()) + rounderMock.RoundIndex = 11 + assert.False(t, bfd.IsConsensusStuck()) } -func TestBaseForkDetector_ResetFork(t *testing.T) { +func TestBaseForkDetector_IsConsensusStuckShouldReturnTrue(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) - bfd.SetShouldForceFork(true) - assert.True(t, bfd.ShouldForceFork()) - bfd.ResetFork() - assert.False(t, bfd.ShouldForceFork()) + // last checkpoint will be (round = 0 , nonce = 0) + // round difference is higher than 10 + // round index is divisible by RoundModulusTrigger -> 5 + // => consensus is stuck + rounderMock.RoundIndex = 20 + assert.True(t, bfd.IsConsensusStuck()) } func TestBaseForkDetector_ComputeTimeDuration(t *testing.T) { @@ -928,7 +981,12 @@ func TestBaseForkDetector_ComputeTimeDuration(t *testing.T) { genesisTime := int64(9000) hdrTimeStamp := uint64(10000) hdrRound := uint64(20) - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, genesisTime) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + genesisTime, + ) hdr1 := &block.Header{Nonce: 1, Round: hdrRound, PubKeysBitmap: []byte("X"), TimeStamp: hdrTimeStamp} diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 51d908f093a..e46ab381803 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -51,8 +51,7 @@ func (ni *notarizedInfo) reset() { } type baseBootstrap struct { - headers storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher + headers dataRetriever.HeadersPool blkc data.ChainHandler blkExecutor process.BlockProcessor @@ -94,7 +93,7 @@ type baseBootstrap struct { uint64Converter typeConverters.Uint64ByteSliceConverter requestsWithTimeout uint32 - requestMiniBlocks func(uint32, uint64) + requestMiniBlocks func(headerHandler data.HeaderHandler) networkWatcher process.NetworkConnectionWatcher getHeaderFromPool func([]byte) (data.HeaderHandler, error) @@ -142,6 +141,10 @@ func (boot *baseBootstrap) requestedHeaderHash() []byte { } func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { + if boot.shardCoordinator.SelfId() != headerHandler.GetShardID() { + return + } + log.Trace("received header from network", "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -149,72 +152,54 @@ func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandle "hash", headerHash, ) - err := boot.forkDetector.AddHeader(headerHandler, headerHash, process.BHReceived, nil, nil, false) + err := boot.forkDetector.AddHeader(headerHandler, headerHash, process.BHReceived, nil, nil) if err != nil { log.Debug("forkDetector.AddHeader", "error", err.Error()) } - boot.mutRcvHdrHash.Lock() - hash := boot.requestedHeaderHash() - if hash == nil { - boot.mutRcvHdrHash.Unlock() - return - } + go boot.requestMiniBlocks(headerHandler) + + boot.confirmHeaderReceivedByNonce(headerHandler, headerHash) + boot.confirmHeaderReceivedByHash(headerHandler, headerHash) +} - if bytes.Equal(hash, headerHash) { +func (boot *baseBootstrap) confirmHeaderReceivedByNonce(headerHandler data.HeaderHandler, hdrHash []byte) { + boot.mutRcvHdrNonce.Lock() + n := boot.requestedHeaderNonce() + if n != nil && *n == headerHandler.GetNonce() { log.Debug("received requested header from network", "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), "nonce", headerHandler.GetNonce(), - "hash", hash, + "hash", hdrHash, ) - boot.setRequestedHeaderHash(nil) - boot.mutRcvHdrHash.Unlock() - boot.chRcvHdrHash <- true - } else { - boot.mutRcvHdrHash.Unlock() - } -} + boot.setRequestedHeaderNonce(nil) + boot.mutRcvHdrNonce.Unlock() + boot.chRcvHdrNonce <- true -// receivedHeaderNonce method is a call back function which is called when a new header is added -// in the block headers pool -func (boot *baseBootstrap) receivedHeaderNonce(nonce uint64, shardId uint32, hash []byte) { - if boot.shardCoordinator.SelfId() != shardId { return } - log.Trace("received header from network", - "shard", shardId, - "nonce", nonce, - "hash", hash, - ) - - err := boot.addReceivedHeaderToForkDetector(hash) - if err != nil { - log.Debug("addReceivedHeaderToForkDetector", "error", err.Error()) - } - - go boot.requestMiniBlocks(shardId, nonce) - - boot.mutRcvHdrNonce.Lock() - n := boot.requestedHeaderNonce() - if n == nil { - boot.mutRcvHdrNonce.Unlock() - return - } + boot.mutRcvHdrNonce.Unlock() +} - if *n == nonce { +func (boot *baseBootstrap) confirmHeaderReceivedByHash(headerHandler data.HeaderHandler, hdrHash []byte) { + boot.mutRcvHdrHash.Lock() + hash := boot.requestedHeaderHash() + if hash != nil && bytes.Equal(hash, hdrHash) { log.Debug("received requested header from network", - "shard", shardId, - "nonce", nonce, + "shard", headerHandler.GetShardID(), + "round", headerHandler.GetRound(), + "nonce", headerHandler.GetNonce(), "hash", hash, ) - boot.setRequestedHeaderNonce(nil) - boot.mutRcvHdrNonce.Unlock() - boot.chRcvHdrNonce <- true - } else { - boot.mutRcvHdrNonce.Unlock() + boot.setRequestedHeaderHash(nil) + boot.mutRcvHdrHash.Unlock() + boot.chRcvHdrHash <- true + + return } + boot.mutRcvHdrHash.Unlock() } // AddSyncStateListener adds a syncStateListener that get notified each time the sync status of the node changes @@ -318,20 +303,20 @@ func (boot *baseBootstrap) ShouldSync() bool { } func (boot *baseBootstrap) removeHeaderFromPools(header data.HeaderHandler) []byte { - boot.headersNonces.Remove(header.GetNonce(), header.GetShardID()) - hash, err := core.CalculateHash(boot.marshalizer, boot.hasher, header) if err != nil { log.Debug("CalculateHash", "error", err.Error()) return nil } + boot.headers.RemoveHeaderByHash(hash) + return hash } func (boot *baseBootstrap) cleanCachesAndStorageOnRollback(header data.HeaderHandler) { hash := boot.removeHeaderFromPools(header) - boot.forkDetector.RemoveHeaders(header.GetNonce(), hash) + boot.forkDetector.RemoveHeader(header.GetNonce(), hash) nonceToByteSlice := boot.uint64Converter.ToByteSlice(header.GetNonce()) _ = boot.headerNonceHashStore.Remove(nonceToByteSlice) } @@ -395,28 +380,6 @@ func checkBootstrapNilParameters( return nil } -// isSigned verifies if a block is signed -func isSigned(header data.HeaderHandler) bool { - // TODO: Later, here it should be done a more complex verification (signature for this round matches with the bitmap, - // and validators which signed here, were in this round consensus group) - bitmap := header.GetPubKeysBitmap() - isBitmapEmpty := bytes.Equal(bitmap, make([]byte, len(bitmap))) - - return !isBitmapEmpty -} - -// isRandomSeedValid verifies if the random seed is valid (equal with a signed previous rand seed) -func isRandomSeedValid(header data.HeaderHandler) bool { - // TODO: Later, here should be done a more complex verification (random seed should be equal with the previous rand - // seed signed by the proposer of this round) - prevRandSeed := header.GetPrevRandSeed() - randSeed := header.GetRandSeed() - isPrevRandSeedNilOrEmpty := len(prevRandSeed) == 0 - isRandSeedNilOrEmpty := len(randSeed) == 0 - - return !isPrevRandSeedNilOrEmpty && !isRandSeedNilOrEmpty -} - func (boot *baseBootstrap) requestHeadersFromNonceIfMissing( nonce uint64, haveHeaderInPoolWithNonce func(uint64) bool, @@ -499,19 +462,15 @@ func (boot *baseBootstrap) doJobOnSyncBlockFail(headerHandler data.HeaderHandler if shouldRollBack { boot.requestsWithTimeout = 0 - if headerHandler != nil { + if !check.IfNil(headerHandler) { hash := boot.removeHeaderFromPools(headerHandler) - boot.forkDetector.RemoveHeaders(headerHandler.GetNonce(), hash) + boot.forkDetector.RemoveHeader(headerHandler.GetNonce(), hash) } errNotCritical := boot.rollBack(false) if errNotCritical != nil { log.Debug("rollBack", "error", errNotCritical.Error()) } - - if allowedRequestsWithTimeOutHaveReached && isInProperRound { - boot.forkDetector.ResetProbableHighestNonce() - } } } @@ -617,6 +576,12 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { if !revertUsingForkNonce && currHeader.GetNonce() <= boot.forkDetector.GetHighestFinalBlockNonce() { return ErrRollBackBehindFinalHeader } + + shouldEndRollBack := revertUsingForkNonce && currHeader.GetNonce() < boot.forkInfo.Nonce + if shouldEndRollBack { + return ErrRollBackBehindForkNonce + } + currBlockBody, err := boot.blockBootstrapper.getBlockBody(currHeader) if err != nil { return err @@ -667,6 +632,7 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { if shouldContinueRollBack { continue } + break } @@ -696,6 +662,19 @@ func (boot *baseBootstrap) rollBackOneBlock( if err != nil { return err } + + // TODO check if pruning should be done on rollback + if boot.accounts.IsPruningEnabled() { + boot.accounts.CancelPrune(prevHeader.GetRootHash()) + + if !bytes.Equal(currHeader.GetRootHash(), prevHeader.GetRootHash()) { + log.Trace("header will be pruned", "root hash", currHeader.GetRootHash()) + errNotCritical := boot.accounts.PruneTrie(currHeader.GetRootHash()) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + } + } } else { err = boot.setCurrentBlockInfo(nil, nil, nil) if err != nil { @@ -742,7 +721,7 @@ func (boot *baseBootstrap) addReceivedHeaderToForkDetector(hash []byte) error { return err } - err = boot.forkDetector.AddHeader(header, hash, process.BHReceived, nil, nil, false) + err = boot.forkDetector.AddHeader(header, hash, process.BHReceived, nil, nil) if err != nil { return err } @@ -762,7 +741,6 @@ func (boot *baseBootstrap) rollBackOnForcedFork() { log.Debug("rollBack", "error", err.Error()) } - boot.forkDetector.ResetProbableHighestNonce() boot.forkDetector.ResetFork() } diff --git a/process/sync/errors.go b/process/sync/errors.go index 2474f4c663a..3574c34186d 100644 --- a/process/sync/errors.go +++ b/process/sync/errors.go @@ -26,6 +26,9 @@ var ErrHigherRoundInBlock = errors.New("higher round in block") // ErrBlockIsNotSigned signals that the block is not signed var ErrBlockIsNotSigned = errors.New("block is not signed") +//ErrCorruptBootstrapFromStorageDb signals that the bootstrap database is corrupt +var ErrCorruptBootstrapFromStorageDb = errors.New("corrupt bootstrap storage database") + // ErrSignedBlock signals that a block is signed type ErrSignedBlock struct { CurrentNonce uint64 @@ -39,6 +42,9 @@ func (err ErrSignedBlock) Error() string { // ErrRollBackBehindFinalHeader signals that a roll back behind final header has been attempted var ErrRollBackBehindFinalHeader = errors.New("roll back behind final header is not permitted") +// ErrRollBackBehindForkNonce signals that a roll back behind fork nonce is not permitted +var ErrRollBackBehindForkNonce = errors.New("roll back behind fork nonce is not permitted") + // ErrRandomSeedNotValid signals that the random seed is not valid var ErrRandomSeedNotValid = errors.New("random seed is not valid") diff --git a/process/sync/export_test.go b/process/sync/export_test.go index 112c57f572f..5a3e906ccd0 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -1,13 +1,9 @@ package sync import ( - "bytes" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/storage" ) func (boot *ShardBootstrap) RequestHeaderWithNonce(nonce uint64) { @@ -18,12 +14,12 @@ func (boot *ShardBootstrap) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice return boot.miniBlocksResolver.GetMiniBlocks(hashes) } -func (boot *MetaBootstrap) ReceivedHeaders(key []byte) { - boot.receivedHeader(key) +func (boot *MetaBootstrap) ReceivedHeaders(header data.HeaderHandler, key []byte) { + boot.processReceivedHeader(header, key) } -func (boot *ShardBootstrap) ReceivedHeaders(key []byte) { - boot.receivedHeaders(key) +func (boot *ShardBootstrap) ReceivedHeaders(header data.HeaderHandler, key []byte) { + boot.processReceivedHeader(header, key) } func (boot *ShardBootstrap) RollBack(revertUsingForkNonce bool) error { @@ -58,8 +54,8 @@ func (bfd *baseForkDetector) LastCheckpointRound() uint64 { return bfd.lastCheckpoint().round } -func (bfd *baseForkDetector) SetFinalCheckpoint(nonce uint64, round uint64) { - bfd.setFinalCheckpoint(&checkpointInfo{nonce: nonce, round: round}) +func (bfd *baseForkDetector) SetFinalCheckpoint(nonce uint64, round uint64, hash []byte) { + bfd.setFinalCheckpoint(&checkpointInfo{nonce: nonce, round: round, hash: hash}) } func (bfd *baseForkDetector) FinalCheckpointNonce() uint64 { @@ -86,19 +82,8 @@ func (bfd *baseForkDetector) ComputeProbableHighestNonce() uint64 { return bfd.computeProbableHighestNonce() } -func (bfd *baseForkDetector) ActivateForcedForkIfNeeded( - header data.HeaderHandler, - state process.BlockHeaderState, -) { - bfd.activateForcedForkIfNeeded(header, state) -} - -func (bfd *baseForkDetector) ShouldForceFork() bool { - return bfd.shouldForceFork() -} - -func (bfd *baseForkDetector) SetShouldForceFork(shouldForceFork bool) { - bfd.setShouldForceFork(shouldForceFork) +func (bfd *baseForkDetector) IsConsensusStuck() bool { + return bfd.isConsensusStuck() } func (hi *headerInfo) Hash() []byte { @@ -161,8 +146,8 @@ func (boot *baseBootstrap) ProcessReceivedHeader(headerHandler data.HeaderHandle boot.processReceivedHeader(headerHandler, headerHash) } -func (boot *ShardBootstrap) RequestMiniBlocksFromHeaderWithNonceIfMissing(shardId uint32, nonce uint64) { - boot.requestMiniBlocksFromHeaderWithNonceIfMissing(shardId, nonce) +func (boot *ShardBootstrap) RequestMiniBlocksFromHeaderWithNonceIfMissing(headerHandler data.HeaderHandler) { + boot.requestMiniBlocksFromHeaderWithNonceIfMissing(headerHandler) } func (bfd *baseForkDetector) IsHeaderReceivedTooLate(header data.HeaderHandler, state process.BlockHeaderState, finality int64) bool { @@ -173,40 +158,18 @@ func (bfd *baseForkDetector) SetProbableHighestNonce(nonce uint64) { bfd.setProbableHighestNonce(nonce) } -func (sfd *shardForkDetector) AddFinalHeaders(finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) { - sfd.addFinalHeaders(finalHeaders, finalHeadersHashes) +func (sfd *shardForkDetector) ComputeFinalCheckpoint() { + sfd.computeFinalCheckpoint() } -func (bfd *baseForkDetector) AddCheckPoint(round uint64, nonce uint64) { - bfd.addCheckpoint(&checkpointInfo{round: round, nonce: nonce}) +func (bfd *baseForkDetector) AddCheckPoint(round uint64, nonce uint64, hash []byte) { + bfd.addCheckpoint(&checkpointInfo{round: round, nonce: nonce, hash: hash}) } func (bfd *baseForkDetector) ComputeGenesisTimeFromHeader(headerHandler data.HeaderHandler) int64 { return bfd.computeGenesisTimeFromHeader(headerHandler) } -func GetCacherWithHeaders( - hdr1 data.HeaderHandler, - hdr2 data.HeaderHandler, - hash1 []byte, - hash2 []byte, -) storage.Cacher { - sds := &mock.CacherStub{ - RegisterHandlerCalled: func(func(key []byte)) {}, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(key, hash1) { - return &hdr1, true - } - if bytes.Equal(key, hash2) { - return &hdr2, true - } - - return nil, false - }, - } - return sds -} - func (boot *baseBootstrap) InitNotarizedMap() map[uint32]*HdrInfo { return make(map[uint32]*HdrInfo, 0) } diff --git a/process/sync/metaForkDetector.go b/process/sync/metaForkDetector.go index c589dc8f33d..b7aae72d057 100644 --- a/process/sync/metaForkDetector.go +++ b/process/sync/metaForkDetector.go @@ -1,6 +1,8 @@ package sync import ( + "math" + "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" @@ -16,6 +18,7 @@ type metaForkDetector struct { func NewMetaForkDetector( rounder consensus.Rounder, blackListHandler process.BlackListHandler, + blockTracker process.BlockTracker, genesisTime int64, ) (*metaForkDetector, error) { @@ -25,17 +28,23 @@ func NewMetaForkDetector( if check.IfNil(blackListHandler) { return nil, process.ErrNilBlackListHandler } + if check.IfNil(blockTracker) { + return nil, process.ErrNilBlockTracker + } bfd := &baseForkDetector{ rounder: rounder, blackListHandler: blackListHandler, genesisTime: genesisTime, + blockTracker: blockTracker, } bfd.headers = make(map[uint64][]*headerInfo) + bfd.fork.checkpoint = make([]*checkpointInfo, 0) checkpoint := &checkpointInfo{} bfd.setFinalCheckpoint(checkpoint) bfd.addCheckpoint(checkpoint) + bfd.fork.rollBackNonce = math.MaxUint64 mfd := metaForkDetector{ baseForkDetector: bfd, @@ -49,47 +58,26 @@ func (mfd *metaForkDetector) AddHeader( header data.HeaderHandler, headerHash []byte, state process.BlockHeaderState, - finalHeaders []data.HeaderHandler, - finalHeadersHashes [][]byte, - isNotarizedShardStuck bool, + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, ) error { + return mfd.addHeader( + header, + headerHash, + state, + selfNotarizedHeaders, + selfNotarizedHeadersHashes, + mfd.doJobOnBHProcessed, + ) +} - if check.IfNil(header) { - return ErrNilHeader - } - if headerHash == nil { - return ErrNilHash - } - - err := mfd.checkBlockBasicValidity(header, headerHash, state) - if err != nil { - return err - } - - mfd.activateForcedForkOnConsensusStuckIfNeeded(header, state) - - isHeaderReceivedTooLate := mfd.isHeaderReceivedTooLate(header, state, process.MetaBlockFinality) - if isHeaderReceivedTooLate { - state = process.BHReceivedTooLate - } - - if state == process.BHProcessed { - mfd.setFinalCheckpoint(mfd.lastCheckpoint()) - mfd.addCheckpoint(&checkpointInfo{nonce: header.GetNonce(), round: header.GetRound()}) - mfd.removePastOrInvalidRecords() - mfd.setIsNotarizedShardStuck(isNotarizedShardStuck) - } - - mfd.append(&headerInfo{ - nonce: header.GetNonce(), - round: header.GetRound(), - hash: headerHash, - state: state, - }) - - probableHighestNonce := mfd.computeProbableHighestNonce() - mfd.setLastBlockRound(uint64(mfd.rounder.Index())) - mfd.setProbableHighestNonce(probableHighestNonce) - - return nil +func (mfd *metaForkDetector) doJobOnBHProcessed( + header data.HeaderHandler, + headerHash []byte, + _ []data.HeaderHandler, + _ [][]byte, +) { + mfd.setFinalCheckpoint(mfd.lastCheckpoint()) + mfd.addCheckpoint(&checkpointInfo{nonce: header.GetNonce(), round: header.GetRound(), hash: headerHash}) + mfd.removePastOrInvalidRecords() } diff --git a/process/sync/metaForkDetector_test.go b/process/sync/metaForkDetector_test.go index aa4a9097d72..d72da6dd6a3 100644 --- a/process/sync/metaForkDetector_test.go +++ b/process/sync/metaForkDetector_test.go @@ -13,7 +13,12 @@ import ( func TestNewMetaForkDetector_NilRounderShouldErr(t *testing.T) { t.Parallel() - sfd, err := sync.NewMetaForkDetector(nil, &mock.BlackListHandlerStub{}, 0) + sfd, err := sync.NewMetaForkDetector( + nil, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) assert.Nil(t, sfd) assert.Equal(t, process.ErrNilRounder, err) } @@ -21,15 +26,38 @@ func TestNewMetaForkDetector_NilRounderShouldErr(t *testing.T) { func TestNewMetaForkDetector_NilBlackListShouldErr(t *testing.T) { t.Parallel() - sfd, err := sync.NewMetaForkDetector(&mock.RounderMock{}, nil, 0) + sfd, err := sync.NewMetaForkDetector( + &mock.RounderMock{}, + nil, + &mock.BlockTrackerMock{}, + 0, + ) assert.Nil(t, sfd) assert.Equal(t, process.ErrNilBlackListHandler, err) } +func TestNewMetaForkDetector_NilBlockTrackerShouldErr(t *testing.T) { + t.Parallel() + + sfd, err := sync.NewMetaForkDetector( + &mock.RounderMock{}, + &mock.BlackListHandlerStub{}, + nil, + 0, + ) + assert.Nil(t, sfd) + assert.Equal(t, process.ErrNilBlockTracker, err) +} + func TestNewMetaForkDetector_OkParamsShouldWork(t *testing.T) { t.Parallel() - sfd, err := sync.NewMetaForkDetector(&mock.RounderMock{}, &mock.BlackListHandlerStub{}, 0) + sfd, err := sync.NewMetaForkDetector( + &mock.RounderMock{}, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) assert.Nil(t, err) assert.NotNil(t, sfd) @@ -43,8 +71,8 @@ func TestMetaForkDetector_AddHeaderNilHeaderShouldErr(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.AddHeader(nil, make([]byte, 0), process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) + err := bfd.AddHeader(nil, make([]byte, 0), process.BHProcessed, nil, nil) assert.Equal(t, sync.ErrNilHeader, err) } @@ -52,35 +80,20 @@ func TestMetaForkDetector_AddHeaderNilHashShouldErr(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.AddHeader(&block.Header{}, nil, process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) + err := bfd.AddHeader(&block.Header{}, nil, process.BHProcessed, nil, nil) assert.Equal(t, sync.ErrNilHash, err) } -func TestMetaForkDetector_AddHeaderUnsignedBlockShouldErr(t *testing.T) { - t.Parallel() - - rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.AddHeader( - &block.Header{Nonce: 1, Round: 1}, - make([]byte, 0), - process.BHProcessed, - nil, - nil, - false) - assert.Equal(t, sync.ErrBlockIsNotSigned, err) -} - func TestMetaForkDetector_AddHeaderNotPresentShouldWork(t *testing.T) { t.Parallel() hdr := &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")} hash := make([]byte, 0) rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) - err := bfd.AddHeader(hdr, hash, process.BHProcessed, nil, nil, false) + err := bfd.AddHeader(hdr, hash, process.BHProcessed, nil, nil) assert.Nil(t, err) hInfos := bfd.GetHeaders(1) @@ -96,10 +109,10 @@ func TestMetaForkDetector_AddHeaderPresentShouldAppend(t *testing.T) { hdr2 := &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")} hash2 := []byte("hash2") rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) - _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil, false) - err := bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil) + err := bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil) assert.Nil(t, err) hInfos := bfd.GetHeaders(1) @@ -114,8 +127,8 @@ func TestMetaForkDetector_AddHeaderWithProcessedBlockShouldSetCheckpoint(t *test hdr1 := &block.Header{Nonce: 69, Round: 72, PubKeysBitmap: []byte("X")} hash1 := []byte("hash1") rounderMock := &mock.RounderMock{RoundIndex: 73} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) + _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil) assert.Equal(t, hdr1.Nonce, bfd.LastCheckpointNonce()) } @@ -126,10 +139,10 @@ func TestMetaForkDetector_AddHeaderPresentShouldNotRewriteState(t *testing.T) { hash := []byte("hash1") hdr2 := &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")} rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) - _ = bfd.AddHeader(hdr1, hash, process.BHReceived, nil, nil, false) - err := bfd.AddHeader(hdr2, hash, process.BHProcessed, nil, nil, false) + _ = bfd.AddHeader(hdr1, hash, process.BHReceived, nil, nil) + err := bfd.AddHeader(hdr2, hash, process.BHProcessed, nil, nil) assert.Nil(t, err) hInfos := bfd.GetHeaders(1) @@ -143,14 +156,13 @@ func TestMetaForkDetector_AddHeaderHigherNonceThanRoundShouldErr(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) + bfd, _ := sync.NewMetaForkDetector(rounderMock, &mock.BlackListHandlerStub{}, &mock.BlockTrackerMock{}, 0) err := bfd.AddHeader( &block.Header{Nonce: 1, Round: 0, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHProcessed, nil, nil, - false, ) assert.Equal(t, sync.ErrHigherNonceInBlock, err) } diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 41c134a4594..fcd5a82ac2f 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -2,7 +2,6 @@ package sync import ( "fmt" - "math" "time" "github.com/ElrondNetwork/elrond-go/consensus" @@ -25,6 +24,7 @@ import ( // MetaBootstrap implements the bootstrap mechanism type MetaBootstrap struct { *baseBootstrap + epochBootstrap process.EpochBootstrapper } // NewMetaBootstrap creates a new Bootstrap object @@ -46,17 +46,18 @@ func NewMetaBootstrap( bootStorer process.BootStorer, storageBootstrapper process.BootstrapperFromStorage, requestedItemsHandler dataRetriever.RequestedItemsHandler, + epochBootstrap process.EpochBootstrapper, ) (*MetaBootstrap, error) { if check.IfNil(poolsHolder) { return nil, process.ErrNilPoolsHolder } - if check.IfNil(poolsHolder.HeadersNonces()) { - return nil, process.ErrNilHeadersNoncesDataPool - } - if check.IfNil(poolsHolder.MetaBlocks()) { + if check.IfNil(poolsHolder.Headers()) { return nil, process.ErrNilMetaBlocksPool } + if check.IfNil(epochBootstrap) { + return nil, process.ErrNilEpochStartTrigger + } err := checkBootstrapNilParameters( blkc, @@ -81,8 +82,7 @@ func NewMetaBootstrap( blkc: blkc, blkExecutor: blkExecutor, store: store, - headers: poolsHolder.MetaBlocks(), - headersNonces: poolsHolder.HeadersNonces(), + headers: poolsHolder.Headers(), rounder: rounder, waitTime: waitTime, hasher: hasher, @@ -99,7 +99,8 @@ func NewMetaBootstrap( } boot := MetaBootstrap{ - baseBootstrap: base, + baseBootstrap: base, + epochBootstrap: epochBootstrap, } base.blockBootstrapper = &boot @@ -149,8 +150,7 @@ func NewMetaBootstrap( boot.setRequestedHeaderHash(nil) boot.setRequestedMiniBlocks(nil) - boot.headersNonces.RegisterHandler(boot.receivedHeaderNonce) - boot.headers.RegisterHandler(boot.receivedHeader) + boot.headers.RegisterHandler(boot.processReceivedHeader) boot.miniBlocks.RegisterHandler(boot.receivedBodyHash) boot.chStopSync = make(chan bool) @@ -185,16 +185,6 @@ func (boot *MetaBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data. return block.Body(miniBlocks), nil } -func (boot *MetaBootstrap) receivedHeader(headerHash []byte) { - header, err := process.GetMetaHeaderFromPool(headerHash, boot.headers) - if err != nil { - log.Trace("GetMetaHeaderFromPool", "error", err.Error()) - return - } - - boot.processReceivedHeader(header, headerHash) -} - // StartSync method will start SyncBlocks as a go routine func (boot *MetaBootstrap) StartSync() { // when a node starts it first tries to bootstrap from storage, if there already exist a database saved @@ -204,11 +194,34 @@ func (boot *MetaBootstrap) StartSync() { } else { _, numHdrs := updateMetricsFromStorage(boot.store, boot.uint64Converter, boot.marshalizer, boot.statusHandler, boot.storageBootstrapper.GetHighestBlockNonce()) boot.blkExecutor.SetNumProcessedObj(numHdrs) + + boot.setLastEpochStartRound() } go boot.syncBlocks() } +func (boot *MetaBootstrap) setLastEpochStartRound() { + hdr := boot.blkc.GetCurrentBlockHeader() + if check.IfNil(hdr) || hdr.GetEpoch() < 1 { + return + } + + epochIdentifier := core.EpochStartIdentifier(hdr.GetEpoch()) + epochStartHdr, err := boot.headerStore.Get([]byte(epochIdentifier)) + if err != nil { + return + } + + epochStartMetaBlock := &block.MetaBlock{} + err = boot.marshalizer.Unmarshal(epochStartMetaBlock, epochStartHdr) + if err != nil { + return + } + + boot.epochBootstrap.SetCurrentEpochStartRound(epochStartMetaBlock.GetRound()) +} + // SyncBlock method actually does the synchronization. It requests the next block header from the pool // and if it is not found there it will be requested from the network. After the header is received, // it requests the block body in the same way(pool and than, if it is not found in the pool, from network). @@ -270,8 +283,7 @@ func (boot *MetaBootstrap) requestHeaderWithHash(hash []byte) { func (boot *MetaBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) (data.HeaderHandler, error) { hdr, _, err := process.GetMetaHeaderFromPoolWithNonce( nonce, - boot.headers, - boot.headersNonces) + boot.headers) if err != nil { _ = process.EmptyChannel(boot.chRcvHdrNonce) boot.requestHeaderWithNonce(nonce) @@ -282,8 +294,7 @@ func (boot *MetaBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) ( hdr, _, err = process.GetMetaHeaderFromPoolWithNonce( nonce, - boot.headers, - boot.headersNonces) + boot.headers) if err != nil { return nil, err } @@ -358,8 +369,7 @@ func (boot *MetaBootstrap) IsInterfaceNil() bool { func (boot *MetaBootstrap) haveHeaderInPoolWithNonce(nonce uint64) bool { _, _, err := process.GetMetaHeaderFromPoolWithNonce( nonce, - boot.headers, - boot.headersNonces) + boot.headers) return err == nil } @@ -391,20 +401,16 @@ func (boot *MetaBootstrap) getBlockBodyRequestingIfMissing(headerHandler data.He return blockBody, nil } -func (boot *MetaBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(_ uint32, nonce uint64) { +func (boot *MetaBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(headerHandler data.HeaderHandler) { nextBlockNonce := boot.getNonceForNextBlock() maxNonce := core.MinUint64(nextBlockNonce+process.MaxHeadersToRequestInAdvance-1, boot.forkDetector.ProbableHighestNonce()) - if nonce < nextBlockNonce || nonce > maxNonce { + if headerHandler.GetNonce() < nextBlockNonce || headerHandler.GetNonce() > maxNonce { return } - header, _, err := process.GetMetaHeaderFromPoolWithNonce( - nonce, - boot.headers, - boot.headersNonces) - - if err != nil { - log.Trace("GetMetaHeaderFromPoolWithNonce", "error", err.Error()) + header, ok := headerHandler.(*block.MetaBlock) + if !ok { + log.Warn("cannot convert headerHandler in block.MetaBlock") return } @@ -442,8 +448,5 @@ func (boot *MetaBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(_ uint3 } func (boot *MetaBootstrap) isForkTriggeredByMeta() bool { - return boot.forkInfo.IsDetected && - boot.forkInfo.Nonce != math.MaxUint64 && - boot.forkInfo.Round != math.MaxUint64 && - boot.forkInfo.Hash != nil + return false } diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 08598de1eba..5a029da0111 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -14,7 +14,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -26,32 +25,24 @@ import ( func createMockMetaPools() *mock.MetaPoolsHolderStub { pools := &mock.MetaPoolsHolderStub{} - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{ - HasOrAddCalled: func(key []byte, value interface{}) (ok, evicted bool) { - return false, false + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{ + AddCalled: func(headerHash []byte, header data.HeaderHandler) { + }, - RegisterHandlerCalled: func(func(key []byte)) {}, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false + RemoveHeaderByHashCalled: func(headerHash []byte) { + }, - RemoveCalled: func(key []byte) { - return + RegisterHandlerCalled: func(handler func(header data.HeaderHandler, shardHeaderHash []byte)) { + }, - } - return sds - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - return nil, false + GetHeaderByHashCalled: func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, nil }, - RegisterHandlerCalled: func(handler func(nonce uint64, shardId uint32, hash []byte)) {}, - RemoveCalled: func(nonce uint64, shardId uint32) {}, - MergeCalled: func(u uint64, src dataRetriever.ShardIdHashMap) {}, } - return hnc + return sds } + pools.MiniBlocksCalled = func() storage.Cacher { sds := &mock.CacherStub{ HasOrAddCalled: func(key []byte, value interface{}) (ok, evicted bool) { @@ -157,6 +148,7 @@ func TestNewMetaBootstrap_NilPoolsHolderShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -167,7 +159,7 @@ func TestNewMetaBootstrap_PoolsHolderRetNilOnHeadersShouldErr(t *testing.T) { t.Parallel() pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { + pools.HeadersCalled = func() dataRetriever.HeadersPool { return nil } @@ -198,6 +190,7 @@ func TestNewMetaBootstrap_PoolsHolderRetNilOnHeadersShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -234,6 +227,7 @@ func TestNewMetaBootstrap_NilStoreShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -270,6 +264,7 @@ func TestNewMetaBootstrap_NilBlockchainShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -306,6 +301,7 @@ func TestNewMetaBootstrap_NilRounderShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -342,6 +338,7 @@ func TestNewMetaBootstrap_NilBlockProcessorShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -378,6 +375,7 @@ func TestNewMetaBootstrap_NilHasherShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -414,6 +412,7 @@ func TestNewMetaBootstrap_NilMarshalizerShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -450,6 +449,7 @@ func TestNewMetaBootstrap_NilForkDetectorShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -487,6 +487,7 @@ func TestNewMetaBootstrap_NilResolversContainerShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -523,6 +524,7 @@ func TestNewMetaBootstrap_NilShardCoordinatorShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -559,6 +561,7 @@ func TestNewMetaBootstrap_NilAccountsAdapterShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -596,6 +599,7 @@ func TestNewMetaBootstrap_NilBlackListHandlerShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -643,6 +647,7 @@ func TestNewMetaBootstrap_NilHeaderResolverShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -690,6 +695,7 @@ func TestNewMetaBootstrap_NilTxBlockBodyResolverShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, bs) @@ -702,26 +708,16 @@ func TestNewMetaBootstrap_OkValsShouldWork(t *testing.T) { wasCalled := 0 pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.HasOrAddCalled = func(key []byte, value interface{}) (ok, evicted bool) { + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.AddCalled = func(headerHash []byte, header data.HeaderHandler) { assert.Fail(t, "should have not reached this point") - return false, false - } - - sds.RegisterHandlerCalled = func(func(key []byte)) { } - - return sds - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { wasCalled++ } - return hnc + return sds } blkc := initBlockchain() @@ -751,6 +747,7 @@ func TestNewMetaBootstrap_OkValsShouldWork(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.NotNil(t, bs) @@ -786,7 +783,7 @@ func TestMetaBootstrap_SyncBlockShouldCallRollBack(t *testing.T) { Hash: []byte("hash"), } } - forkDetector.RemoveHeadersCalled = func(nonce uint64, hash []byte) { + forkDetector.RemoveHeaderCalled = func(nonce uint64, hash []byte) { } forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { return hdr.Nonce @@ -824,6 +821,7 @@ func TestMetaBootstrap_SyncBlockShouldCallRollBack(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) r := bs.SyncBlock() @@ -887,6 +885,7 @@ func TestMetaBootstrap_ShouldReturnTimeIsOutWhenMissingHeader(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) r := bs.SyncBlock() @@ -947,6 +946,7 @@ func TestMetaBootstrap_ShouldNotNeedToSync(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) bs.StartSync() @@ -970,46 +970,28 @@ func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { hash := []byte("aaa") pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{} + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { + sds.GetHeaderByHashCalled = func(hashS []byte) (handler data.HeaderHandler, e error) { mutDataAvailable.RLock() defer mutDataAvailable.RUnlock() - if bytes.Equal(hash, key) && dataAvailable { + if bytes.Equal(hash, hashS) && dataAvailable { return &block.MetaBlock{ Nonce: 2, Round: 1, - RootHash: []byte("bbb")}, true + RootHash: []byte("bbb")}, nil } - return nil, false + return nil, errors.New("err") } - - sds.RegisterHandlerCalled = func(func(key []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { + time.Sleep(10 * time.Millisecond) } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - mutDataAvailable.RLock() - defer mutDataAvailable.RUnlock() - - if u == 2 && dataAvailable { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(sharding.MetachainShardId, hash) - - return syncMap, true - } - - return nil, false - } - return hnc - } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} @@ -1054,6 +1036,7 @@ func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) bs.StartSync() @@ -1064,7 +1047,7 @@ func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { dataAvailable = true mutDataAvailable.Unlock() - time.Sleep(200 * time.Millisecond) + time.Sleep(500 * time.Millisecond) bs.StopSync() } @@ -1082,40 +1065,23 @@ func TestMetaBootstrap_ShouldReturnNilErr(t *testing.T) { hash := []byte("aaa") pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(hash, key) { - return &block.MetaBlock{ + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + if hdrNonce == 2 { + return []data.HeaderHandler{&block.MetaBlock{ Nonce: 2, Round: 1, - RootHash: []byte("bbb")}, true + RootHash: []byte("bbb")}}, [][]byte{hash}, nil } - return nil, false + return nil, nil, errors.New("err") } - - sds.RegisterHandlerCalled = func(func(key []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == 2 { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(sharding.MetachainShardId, hash) - - return syncMap, true - } - - return nil, false - } - return hnc - } pools.MiniBlocksCalled = func() storage.Cacher { sds := &mock.CacherStub{ HasOrAddCalled: func(key []byte, value interface{}) (ok, evicted bool) { @@ -1174,6 +1140,7 @@ func TestMetaBootstrap_ShouldReturnNilErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) r := bs.SyncBlock() @@ -1194,43 +1161,24 @@ func TestMetaBootstrap_SyncBlockShouldReturnErrorWhenProcessBlockFailed(t *testi hash := []byte("aaa") pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(hash, key) { - return &block.MetaBlock{ + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + if hdrNonce == 2 { + return []data.HeaderHandler{&block.MetaBlock{ Nonce: 2, Round: 1, - RootHash: []byte("bbb")}, true + RootHash: []byte("bbb")}}, [][]byte{hash}, nil } - return nil, false + return nil, nil, errors.New("err") } - sds.RegisterHandlerCalled = func(func(key []byte)) { - } - sds.RemoveCalled = func(key []byte) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == 2 { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(sharding.MetachainShardId, hash) - - return syncMap, true - } - - return nil, false - } - hnc.RemoveCalled = func(nonce uint64, shardId uint32) {} - return hnc - } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} @@ -1244,8 +1192,7 @@ func TestMetaBootstrap_SyncBlockShouldReturnErrorWhenProcessBlockFailed(t *testi forkDetector.ProbableHighestNonceCalled = func() uint64 { return 2 } - forkDetector.RemoveHeadersCalled = func(nonce uint64, hash []byte) {} - forkDetector.ResetProbableHighestNonceCalled = func() {} + forkDetector.RemoveHeaderCalled = func(nonce uint64, hash []byte) {} forkDetector.GetNotarizedHeaderHashCalled = func(nonce uint64) []byte { return nil } @@ -1284,6 +1231,7 @@ func TestMetaBootstrap_SyncBlockShouldReturnErrorWhenProcessBlockFailed(t *testi &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) err := bs.SyncBlock() @@ -1308,7 +1256,7 @@ func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenCurrentBlockIsNilAndRoundI shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} - rnd, _ := round.NewRound(time.Now(), time.Now(), 100*time.Millisecond, &mock.SyncTimerMock{}) + rnd, _ := round.NewRound(time.Now(), time.Now(), 200*time.Millisecond, &mock.SyncTimerMock{}) bs, err := sync.NewMetaBootstrap( pools, @@ -1332,6 +1280,7 @@ func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenCurrentBlockIsNilAndRoundI &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.Nil(t, err) @@ -1380,6 +1329,7 @@ func TestMetaBootstrap_ShouldReturnTrueWhenCurrentBlockIsNilAndRoundIndexIsGreat &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.True(t, bs.ShouldSync()) @@ -1432,8 +1382,8 @@ func TestMetaBootstrap_ShouldReturnFalseWhenNodeIsSynced(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) - assert.False(t, bs.ShouldSync()) } @@ -1484,12 +1434,13 @@ func TestMetaBootstrap_ShouldReturnTrueWhenNodeIsNotSynced(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) assert.True(t, bs.ShouldSync()) } -func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceivesTheSameWrongHeader(t *testing.T) { +func TestMetaBootstrap_ShouldSyncShouldReturnTrueWhenForkIsDetectedAndItReceivesTheSameWrongHeader(t *testing.T) { t.Parallel() hdr1 := block.MetaBlock{Nonce: 1, Round: 2, PubKeysBitmap: []byte("A")} @@ -1504,15 +1455,31 @@ func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceive } pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - return sync.GetCacherWithHeaders(&hdr1, &hdr2, hash1, hash2) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(key, hash1) { + return &hdr1, nil + } + if bytes.Equal(key, hash2) { + return &hdr2, nil + } + + return nil, errors.New("err") + } + return sds } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} rounder := &mock.RounderMock{} rounder.RoundIndex = 2 - forkDetector, _ := sync.NewMetaForkDetector(rounder, &mock.BlackListHandlerStub{}, 0) + forkDetector, _ := sync.NewMetaForkDetector( + rounder, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} @@ -1538,24 +1505,25 @@ func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceive &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) - _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil, false) - _ = forkDetector.AddHeader(&hdr2, hash2, process.BHReceived, nil, nil, false) + _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil) + _ = forkDetector.AddHeader(&hdr2, hash2, process.BHReceived, nil, nil) shouldSync := bs.ShouldSync() assert.True(t, shouldSync) assert.True(t, bs.IsForkDetected()) if shouldSync && bs.IsForkDetected() { - forkDetector.RemoveHeaders(hdr1.GetNonce(), hash1) - bs.ReceivedHeaders(hash1) - _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil, false) + forkDetector.RemoveHeader(hdr1.GetNonce(), hash1) + bs.ReceivedHeaders(&hdr1, hash1) + _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil) } shouldSync = bs.ShouldSync() - assert.False(t, shouldSync) - assert.False(t, bs.IsForkDetected()) + assert.True(t, shouldSync) + assert.True(t, bs.IsForkDetected()) } func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceivesTheGoodHeader(t *testing.T) { @@ -1573,15 +1541,31 @@ func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceive } pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - return sync.GetCacherWithHeaders(&hdr1, &hdr2, hash1, hash2) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(key, hash1) { + return &hdr1, nil + } + if bytes.Equal(key, hash2) { + return &hdr2, nil + } + + return nil, errors.New("err") + } + return sds } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} rounder := &mock.RounderMock{} rounder.RoundIndex = 2 - forkDetector, _ := sync.NewMetaForkDetector(rounder, &mock.BlackListHandlerStub{}, 0) + forkDetector, _ := sync.NewMetaForkDetector( + rounder, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} @@ -1607,21 +1591,24 @@ func TestMetaBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceive &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) - _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil, false) - _ = forkDetector.AddHeader(&hdr2, hash2, process.BHReceived, nil, nil, false) + _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil) + _ = forkDetector.AddHeader(&hdr2, hash2, process.BHReceived, nil, nil) shouldSync := bs.ShouldSync() assert.True(t, shouldSync) assert.True(t, bs.IsForkDetected()) if shouldSync && bs.IsForkDetected() { - forkDetector.RemoveHeaders(hdr1.GetNonce(), hash1) - bs.ReceivedHeaders(hash2) - _ = forkDetector.AddHeader(&hdr2, hash2, process.BHProcessed, nil, nil, false) + forkDetector.RemoveHeader(hdr1.GetNonce(), hash1) + bs.ReceivedHeaders(&hdr2, hash2) + _ = forkDetector.AddHeader(&hdr2, hash2, process.BHProcessed, nil, nil) } + time.Sleep(500 * time.Millisecond) + shouldSync = bs.ShouldSync() assert.False(t, shouldSync) assert.False(t, bs.IsForkDetected()) @@ -1637,11 +1624,14 @@ func TestMetaBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { forkDetector.CheckForkCalled = func() *process.ForkInfo { return process.NewForkInfo() } + forkDetector.ProbableHighestNonceCalled = func() uint64 { + return 0 + } shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} - rnd, _ := round.NewRound(time.Now(), time.Now(), 100*time.Millisecond, &mock.SyncTimerMock{}) + rnd, _ := round.NewRound(time.Now(), time.Now(), 200*time.Millisecond, &mock.SyncTimerMock{}) bs, _ := sync.NewMetaBootstrap( pools, @@ -1661,9 +1651,12 @@ func TestMetaBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) - hdr, _, _ := process.GetMetaHeaderFromPoolWithNonce(0, pools.MetaBlocks(), pools.HeadersNonces()) + hdr, _, _ := process.GetMetaHeaderFromPoolWithNonce(0, pools.HeadersCalled()) + + time.Sleep(500 * time.Millisecond) assert.NotNil(t, bs) assert.Nil(t, hdr) } @@ -1675,40 +1668,28 @@ func TestMetaBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { hash := []byte("aaa") pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(hash, key) { - return hdr, true + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + if hdrNonce == 0 { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil } - return nil, false + return nil, nil, errors.New("err") } - sds.RegisterHandlerCalled = func(func(key []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == 0 { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(sharding.MetachainShardId, hash) - - return syncMap, true - } - - return nil, false - } - return hnc - } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} + forkDetector.ProbableHighestNonceCalled = func() uint64 { + return 0 + } + account := &mock.AccountsStub{} shardCoordinator := mock.NewOneShardCoordinatorMock() @@ -1733,9 +1714,10 @@ func TestMetaBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) - hdr2, _, _ := process.GetMetaHeaderFromPoolWithNonce(0, pools.MetaBlocks(), pools.HeadersNonces()) + hdr2, _, _ := process.GetMetaHeaderFromPoolWithNonce(0, pools.HeadersCalled()) assert.NotNil(t, bs) assert.True(t, hdr == hdr2) } @@ -1749,17 +1731,18 @@ func TestMetaBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *test addedHdr := &block.MetaBlock{} pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - sds.RegisterHandlerCalled = func(func(key []byte)) { + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { + sds.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { if bytes.Equal(key, addedHash) { - return addedHdr, true + return addedHdr, nil } - return nil, false + return nil, errors.New("err") } + return sds } @@ -1767,7 +1750,7 @@ func TestMetaBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *test hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - forkDetector.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + forkDetector.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { if state == process.BHProcessed { return errors.New("processed") } @@ -1783,8 +1766,13 @@ func TestMetaBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *test wasAdded = true return nil } + forkDetector.ProbableHighestNonceCalled = func() uint64 { + return 0 + } + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.CurrentShard = sharding.MetachainShardId - shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} rnd, _ := round.NewRound(time.Now(), time.Now(), 100*time.Millisecond, &mock.SyncTimerMock{}) @@ -1807,10 +1795,12 @@ func TestMetaBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *test &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) - bs.ReceivedHeaders(addedHash) + bs.ReceivedHeaders(addedHdr, addedHash) + time.Sleep(500 * time.Millisecond) assert.True(t, wasAdded) } @@ -1826,7 +1816,7 @@ func TestMetaBootstrap_ReceivedHeadersNotFoundInPoolShouldNotAddToForkDetector(t hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - forkDetector.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + forkDetector.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { if state == process.BHProcessed { return errors.New("processed") } @@ -1884,10 +1874,11 @@ func TestMetaBootstrap_ReceivedHeadersNotFoundInPoolShouldNotAddToForkDetector(t &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) - bs.ReceivedHeaders(addedHash) - + bs.ReceivedHeaders(addedHdr, addedHash) + time.Sleep(500 * time.Millisecond) assert.False(t, wasAdded) } @@ -1924,6 +1915,7 @@ func TestMetaBootstrap_RollBackNilBlockchainHeaderShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) err := bs.RollBack(false) @@ -1961,6 +1953,7 @@ func TestMetaBootstrap_RollBackNilParamHeaderShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { @@ -1978,21 +1971,18 @@ func TestMetaBootstrap_RollBackIsNotEmptyShouldErr(t *testing.T) { newHdrNonce := uint64(6) remFlags := &removedFlags{} - shardId := sharding.MetachainShardId pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - return createHeadersDataPool(newHdrHash, remFlags) - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return createHeadersNoncesDataPool( - newHdrNonce, - newHdrHash, - newHdrNonce, - remFlags, - shardId, - ) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.RemoveHeaderByHashCalled = func(headerHash []byte) { + if bytes.Equal(headerHash, newHdrHash) { + remFlags.flagHdrRemovedFromHeaders = true + } + } + return sds } + blkc := initBlockchain() rnd := &mock.RounderMock{} blkExec := &mock.BlockProcessorMock{} @@ -2024,6 +2014,7 @@ func TestMetaBootstrap_RollBackIsNotEmptyShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) blkc.GetCurrentBlockHeaderCalled = func() data.HeaderHandler { @@ -2062,21 +2053,16 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *te } pools := createMockMetaPools() - shardId := sharding.MetachainShardId //data pool headers - pools.MetaBlocksCalled = func() storage.Cacher { - return createHeadersDataPool(currentHdrHash, remFlags) - } - //data pool headers-nonces - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return createHeadersNoncesDataPool( - currentHdrNonce, - currentHdrHash, - currentHdrNonce, - remFlags, - shardId, - ) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.RemoveHeaderByHashCalled = func(headerHash []byte) { + if bytes.Equal(headerHash, currentHdrHash) { + remFlags.flagHdrRemovedFromHeaders = true + } + } + return sds } //a mock blockchain with special header and tx block bodies stubs (defined above) @@ -2164,6 +2150,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *te &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) bs.SetForkNonce(currentHdrNonce) @@ -2200,8 +2187,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *te err := bs.RollBack(true) assert.Nil(t, err) - assert.True(t, remFlags.flagHdrRemovedFromNonces) - assert.False(t, remFlags.flagHdrRemovedFromHeaders) + assert.True(t, remFlags.flagHdrRemovedFromHeaders) assert.True(t, remFlags.flagHdrRemovedFromStorage) assert.True(t, remFlags.flagHdrRemovedFromForkDetector) assert.Equal(t, blkc.GetCurrentBlockHeader(), prevHdr) @@ -2234,21 +2220,16 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t } pools := createMockMetaPools() - shardId := sharding.MetachainShardId //data pool headers - pools.MetaBlocksCalled = func() storage.Cacher { - return createHeadersDataPool(currentHdrHash, remFlags) - } - //data pool headers-nonces - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return createHeadersNoncesDataPool( - currentHdrNonce, - currentHdrHash, - currentHdrNonce, - remFlags, - shardId, - ) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.RemoveHeaderByHashCalled = func(headerHash []byte) { + if bytes.Equal(headerHash, currentHdrHash) { + remFlags.flagHdrRemovedFromHeaders = true + } + } + return sds } //a mock blockchain with special header and tx block bodies stubs (defined above) @@ -2337,6 +2318,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) bs.SetForkNonce(currentHdrNonce) @@ -2364,8 +2346,7 @@ func TestMetaBootstrap_RollBackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t err := bs.RollBack(true) assert.Nil(t, err) - assert.True(t, remFlags.flagHdrRemovedFromNonces) - assert.False(t, remFlags.flagHdrRemovedFromHeaders) + assert.True(t, remFlags.flagHdrRemovedFromHeaders) assert.True(t, remFlags.flagHdrRemovedFromStorage) assert.True(t, remFlags.flagHdrRemovedFromForkDetector) assert.Nil(t, blkc.GetCurrentBlockHeader()) @@ -2403,6 +2384,7 @@ func TestMetaBootstrap_AddSyncStateListenerShouldAppendAnotherListener(t *testin &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) f1 := func(bool) {} @@ -2448,6 +2430,7 @@ func TestMetaBootstrap_NotifySyncStateListenersShouldNotify(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) mutex.RLock() @@ -2493,23 +2476,8 @@ func TestMetaBootstrap_SetStatusHandlerNilHandlerShouldErr(t *testing.T) { t.Parallel() pools := createMockMetaPools() - pools.MetaBlocksCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.HasOrAddCalled = func(key []byte, value interface{}) (ok, evicted bool) { - return false, false - } - - sds.RegisterHandlerCalled = func(func(key []byte)) { - } - - return sds - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - - return hnc + pools.HeadersCalled = func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} } blkc := initBlockchain() @@ -2539,6 +2507,7 @@ func TestMetaBootstrap_SetStatusHandlerNilHandlerShouldErr(t *testing.T) { &mock.BoostrapStorerMock{}, &mock.StorageBootstrapperMock{}, &mock.RequestedItemsHandlerStub{}, + &mock.EpochStartTriggerStub{}, ) err := bs.SetStatusHandler(nil) diff --git a/process/sync/shardForkDetector.go b/process/sync/shardForkDetector.go index 39744d6c7f2..ef81169f222 100644 --- a/process/sync/shardForkDetector.go +++ b/process/sync/shardForkDetector.go @@ -1,10 +1,14 @@ package sync import ( + "bytes" + "math" + "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" ) // shardForkDetector implements the shard fork detector mechanism @@ -16,6 +20,7 @@ type shardForkDetector struct { func NewShardForkDetector( rounder consensus.Rounder, blackListHandler process.BlackListHandler, + blockTracker process.BlockTracker, genesisTime int64, ) (*shardForkDetector, error) { @@ -25,22 +30,30 @@ func NewShardForkDetector( if check.IfNil(blackListHandler) { return nil, process.ErrNilBlackListHandler } + if check.IfNil(blockTracker) { + return nil, process.ErrNilBlockTracker + } bfd := &baseForkDetector{ rounder: rounder, blackListHandler: blackListHandler, genesisTime: genesisTime, + blockTracker: blockTracker, } bfd.headers = make(map[uint64][]*headerInfo) + bfd.fork.checkpoint = make([]*checkpointInfo, 0) checkpoint := &checkpointInfo{} bfd.setFinalCheckpoint(checkpoint) bfd.addCheckpoint(checkpoint) + bfd.fork.rollBackNonce = math.MaxUint64 sfd := shardForkDetector{ baseForkDetector: bfd, } + sfd.blockTracker.RegisterSelfNotarizedHeadersHandler(sfd.ReceivedSelfNotarizedHeaders) + return &sfd, nil } @@ -49,67 +62,126 @@ func (sfd *shardForkDetector) AddHeader( header data.HeaderHandler, headerHash []byte, state process.BlockHeaderState, - finalHeaders []data.HeaderHandler, - finalHeadersHashes [][]byte, - isNotarizedShardStuck bool, + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, ) error { + return sfd.addHeader( + header, + headerHash, + state, + selfNotarizedHeaders, + selfNotarizedHeadersHashes, + sfd.doJobOnBHProcessed, + ) +} - if check.IfNil(header) { - return ErrNilHeader - } - if headerHash == nil { - return ErrNilHash +func (sfd *shardForkDetector) doJobOnBHProcessed( + header data.HeaderHandler, + headerHash []byte, + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, +) { + _ = sfd.appendSelfNotarizedHeaders(selfNotarizedHeaders, selfNotarizedHeadersHashes, sharding.MetachainShardId) + sfd.computeFinalCheckpoint() + sfd.addCheckpoint(&checkpointInfo{nonce: header.GetNonce(), round: header.GetRound(), hash: headerHash}) + sfd.removePastOrInvalidRecords() +} + +// ReceivedSelfNotarizedHeaders is a registered call handler through which fork detector is notified when metachain +// notarized new headers from self shard +func (sfd *shardForkDetector) ReceivedSelfNotarizedHeaders( + shardID uint32, + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, +) { + // accept only self notarized headers by meta + if shardID != sharding.MetachainShardId { + return } - err := sfd.checkBlockBasicValidity(header, headerHash, state) - if err != nil { - return err + appended := sfd.appendSelfNotarizedHeaders(selfNotarizedHeaders, selfNotarizedHeadersHashes, shardID) + if appended { + sfd.computeFinalCheckpoint() } +} - sfd.activateForcedForkIfNeeded(header, state) +func (sfd *shardForkDetector) appendSelfNotarizedHeaders( + selfNotarizedHeaders []data.HeaderHandler, + selfNotarizedHeadersHashes [][]byte, + shardID uint32, +) bool { - isHeaderReceivedTooLate := sfd.isHeaderReceivedTooLate(header, state, process.ShardBlockFinality) - if isHeaderReceivedTooLate { - state = process.BHReceivedTooLate - } + selfNotarizedHeaderAdded := false + finalNonce := sfd.finalCheckpoint().nonce + + for i := 0; i < len(selfNotarizedHeaders); i++ { + if selfNotarizedHeaders[i].GetNonce() <= finalNonce { + continue + } - if state == process.BHProcessed { - sfd.addFinalHeaders(finalHeaders, finalHeadersHashes) - sfd.addCheckpoint(&checkpointInfo{nonce: header.GetNonce(), round: header.GetRound()}) - sfd.removePastOrInvalidRecords() - sfd.setIsNotarizedShardStuck(isNotarizedShardStuck) + appended := sfd.append(&headerInfo{ + nonce: selfNotarizedHeaders[i].GetNonce(), + round: selfNotarizedHeaders[i].GetRound(), + hash: selfNotarizedHeadersHashes[i], + state: process.BHNotarized, + }) + if appended { + log.Debug("added self notarized header in fork detector", + "shard", shardID, + "round", selfNotarizedHeaders[i].GetRound(), + "nonce", selfNotarizedHeaders[i].GetNonce(), + "hash", selfNotarizedHeadersHashes[i]) + + selfNotarizedHeaderAdded = true + } } - sfd.append(&headerInfo{ - nonce: header.GetNonce(), - round: header.GetRound(), - hash: headerHash, - state: state, - }) + return selfNotarizedHeaderAdded +} + +func (sfd *shardForkDetector) computeFinalCheckpoint() { + finalCheckpoint := sfd.finalCheckpoint() - probableHighestNonce := sfd.computeProbableHighestNonce() - sfd.setLastBlockRound(uint64(sfd.rounder.Index())) - sfd.setProbableHighestNonce(probableHighestNonce) + sfd.mutHeaders.RLock() + for nonce, headersInfo := range sfd.headers { + if finalCheckpoint.nonce >= nonce { + continue + } - return nil + indexBHProcessed, indexBHNotarized := sfd.getProcessedAndNotarizedIndexes(headersInfo) + isProcessedBlockAlreadyNotarized := indexBHProcessed != -1 && indexBHNotarized != -1 + if !isProcessedBlockAlreadyNotarized { + continue + } + + sameHash := bytes.Equal(headersInfo[indexBHNotarized].hash, headersInfo[indexBHProcessed].hash) + if !sameHash { + continue + } + + finalCheckpoint = &checkpointInfo{ + nonce: nonce, + round: headersInfo[indexBHNotarized].round, + hash: headersInfo[indexBHNotarized].hash, + } + } + sfd.mutHeaders.RUnlock() + + sfd.setFinalCheckpoint(finalCheckpoint) } -func (sfd *shardForkDetector) addFinalHeaders(finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) { - finalCheckpointWasSet := false - for i := 0; i < len(finalHeaders); i++ { - isFinalHeaderNonceNotLowerThanCurrent := finalHeaders[i].GetNonce() >= sfd.finalCheckpoint().nonce - if isFinalHeaderNonceNotLowerThanCurrent { - if !finalCheckpointWasSet { - sfd.setFinalCheckpoint(&checkpointInfo{nonce: finalHeaders[i].GetNonce(), round: finalHeaders[i].GetRound()}) - finalCheckpointWasSet = true - } - - sfd.append(&headerInfo{ - nonce: finalHeaders[i].GetNonce(), - round: finalHeaders[i].GetRound(), - hash: finalHeadersHashes[i], - state: process.BHNotarized, - }) +func (sfd *shardForkDetector) getProcessedAndNotarizedIndexes(headersInfo []*headerInfo) (int, int) { + indexBHProcessed := -1 + indexBHNotarized := -1 + + for index, headerInfo := range headersInfo { + switch headerInfo.state { + case process.BHProcessed: + indexBHProcessed = index + case process.BHNotarized: + indexBHNotarized = index } } + + return indexBHProcessed, indexBHNotarized } diff --git a/process/sync/shardForkDetector_test.go b/process/sync/shardForkDetector_test.go index bf882ab15e2..50fd56516bc 100644 --- a/process/sync/shardForkDetector_test.go +++ b/process/sync/shardForkDetector_test.go @@ -13,7 +13,12 @@ import ( func TestNewShardForkDetector_NilRounderShouldErr(t *testing.T) { t.Parallel() - sfd, err := sync.NewShardForkDetector(nil, &mock.BlackListHandlerStub{}, 0) + sfd, err := sync.NewShardForkDetector( + nil, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) assert.Nil(t, sfd) assert.Equal(t, process.ErrNilRounder, err) } @@ -21,15 +26,38 @@ func TestNewShardForkDetector_NilRounderShouldErr(t *testing.T) { func TestNewShardForkDetector_NilBlackListShouldErr(t *testing.T) { t.Parallel() - sfd, err := sync.NewShardForkDetector(&mock.RounderMock{}, nil, 0) + sfd, err := sync.NewShardForkDetector( + &mock.RounderMock{}, + nil, + &mock.BlockTrackerMock{}, + 0, + ) assert.Nil(t, sfd) assert.Equal(t, process.ErrNilBlackListHandler, err) } +func TestNewShardForkDetector_NilBlockTrackerShouldErr(t *testing.T) { + t.Parallel() + + sfd, err := sync.NewShardForkDetector( + &mock.RounderMock{}, + &mock.BlackListHandlerStub{}, + nil, + 0, + ) + assert.Nil(t, sfd) + assert.Equal(t, process.ErrNilBlockTracker, err) +} + func TestNewShardForkDetector_OkParamsShouldWork(t *testing.T) { t.Parallel() - sfd, err := sync.NewShardForkDetector(&mock.RounderMock{}, &mock.BlackListHandlerStub{}, 0) + sfd, err := sync.NewShardForkDetector( + &mock.RounderMock{}, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) assert.Nil(t, err) assert.NotNil(t, sfd) @@ -43,8 +71,13 @@ func TestShardForkDetector_AddHeaderNilHeaderShouldErr(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.AddHeader(nil, make([]byte, 0), process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + err := bfd.AddHeader(nil, make([]byte, 0), process.BHProcessed, nil, nil) assert.Equal(t, sync.ErrNilHeader, err) } @@ -52,35 +85,29 @@ func TestShardForkDetector_AddHeaderNilHashShouldErr(t *testing.T) { t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.AddHeader(&block.Header{}, nil, process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + err := bfd.AddHeader(&block.Header{}, nil, process.BHProcessed, nil, nil) assert.Equal(t, sync.ErrNilHash, err) } -func TestShardForkDetector_AddHeaderUnsignedBlockShouldErr(t *testing.T) { - t.Parallel() - - rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.AddHeader( - &block.Header{Nonce: 1, Round: 1}, - make([]byte, 0), - process.BHProcessed, - nil, - nil, - false) - assert.Equal(t, sync.ErrBlockIsNotSigned, err) -} - func TestShardForkDetector_AddHeaderNotPresentShouldWork(t *testing.T) { t.Parallel() hdr := &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")} hash := make([]byte, 0) rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - - err := bfd.AddHeader(hdr, hash, process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + err := bfd.AddHeader(hdr, hash, process.BHProcessed, nil, nil) assert.Nil(t, err) hInfos := bfd.GetHeaders(1) @@ -96,10 +123,14 @@ func TestShardForkDetector_AddHeaderPresentShouldAppend(t *testing.T) { hdr2 := &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")} hash2 := []byte("hash2") rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - - _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil, false) - err := bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil) + err := bfd.AddHeader(hdr2, hash2, process.BHProcessed, nil, nil) assert.Nil(t, err) hInfos := bfd.GetHeaders(1) @@ -114,8 +145,13 @@ func TestShardForkDetector_AddHeaderWithProcessedBlockShouldSetCheckpoint(t *tes hdr1 := &block.Header{Nonce: 69, Round: 72, PubKeysBitmap: []byte("X")} hash1 := []byte("hash1") rounderMock := &mock.RounderMock{RoundIndex: 73} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + _ = bfd.AddHeader(hdr1, hash1, process.BHProcessed, nil, nil) assert.Equal(t, hdr1.Nonce, bfd.LastCheckpointNonce()) } @@ -126,10 +162,14 @@ func TestShardForkDetector_AddHeaderPresentShouldNotRewriteState(t *testing.T) { hash := []byte("hash1") hdr2 := &block.Header{Nonce: 1, Round: 1, PubKeysBitmap: []byte("X")} rounderMock := &mock.RounderMock{RoundIndex: 1} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - - _ = bfd.AddHeader(hdr1, hash, process.BHReceived, nil, nil, false) - err := bfd.AddHeader(hdr2, hash, process.BHProcessed, nil, nil, false) + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) + _ = bfd.AddHeader(hdr1, hash, process.BHReceived, nil, nil) + err := bfd.AddHeader(hdr2, hash, process.BHProcessed, nil, nil) assert.Nil(t, err) hInfos := bfd.GetHeaders(1) @@ -143,14 +183,13 @@ func TestShardForkDetector_AddHeaderHigherNonceThanRoundShouldErr(t *testing.T) t.Parallel() rounderMock := &mock.RounderMock{RoundIndex: 100} - bfd, _ := sync.NewShardForkDetector(rounderMock, &mock.BlackListHandlerStub{}, 0) - err := bfd.AddHeader( - &block.Header{Nonce: 1, Round: 0, PubKeysBitmap: []byte("X")}, - []byte("hash1"), - process.BHProcessed, - nil, - nil, - false, + bfd, _ := sync.NewShardForkDetector( + rounderMock, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, ) + err := bfd.AddHeader( + &block.Header{Nonce: 1, Round: 0, PubKeysBitmap: []byte("X")}, []byte("hash1"), process.BHProcessed, nil, nil) assert.Equal(t, sync.ErrHigherNonceInBlock, err) } diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 3c8a2713011..1cb4455c15d 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -54,9 +54,6 @@ func NewShardBootstrap( if check.IfNil(poolsHolder.Headers()) { return nil, process.ErrNilHeadersDataPool } - if check.IfNil(poolsHolder.HeadersNonces()) { - return nil, process.ErrNilHeadersNoncesDataPool - } if check.IfNil(poolsHolder.MiniBlocks()) { return nil, process.ErrNilTxBlockBody } @@ -85,7 +82,6 @@ func NewShardBootstrap( blkExecutor: blkExecutor, store: store, headers: poolsHolder.Headers(), - headersNonces: poolsHolder.HeadersNonces(), rounder: rounder, waitTime: waitTime, hasher: hasher, @@ -114,7 +110,8 @@ func NewShardBootstrap( //should be then removed from ShardBootstrap //there is one header topic so it is ok to save it - hdrResolver, err := resolversFinder.IntraShardResolver(factory.HeadersTopic) + //TODO make use of requestHandler + hdrResolver, err := resolversFinder.CrossShardResolver(factory.ShardBlocksTopic, sharding.MetachainShardId) if err != nil { return nil, err } @@ -153,9 +150,8 @@ func NewShardBootstrap( boot.setRequestedHeaderHash(nil) boot.setRequestedMiniBlocks(nil) - boot.headersNonces.RegisterHandler(boot.receivedHeaderNonce) boot.miniBlocks.RegisterHandler(boot.receivedBodyHash) - boot.headers.RegisterHandler(boot.receivedHeaders) + boot.headers.RegisterHandler(boot.processReceivedHeader) boot.chStopSync = make(chan bool) @@ -189,16 +185,6 @@ func (boot *ShardBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data return block.Body(miniBlocks), nil } -func (boot *ShardBootstrap) receivedHeaders(headerHash []byte) { - header, err := process.GetShardHeaderFromPool(headerHash, boot.headers) - if err != nil { - log.Trace("GetShardHeaderFromPool", "error", err.Error()) - return - } - - boot.processReceivedHeader(header, headerHash) -} - // StartSync method will start SyncBlocks as a go routine func (boot *ShardBootstrap) StartSync() { errNotCritical := boot.storageBootstrapper.LoadFromStorage() @@ -276,12 +262,11 @@ func (boot *ShardBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) hdr, _, err := process.GetShardHeaderFromPoolWithNonce( nonce, boot.shardCoordinator.SelfId(), - boot.headers, - boot.headersNonces) + boot.headers) if err != nil { _ = process.EmptyChannel(boot.chRcvHdrNonce) boot.requestHeaderWithNonce(nonce) - err := boot.waitForHeaderNonce() + err = boot.waitForHeaderNonce() if err != nil { return nil, err } @@ -289,8 +274,7 @@ func (boot *ShardBootstrap) getHeaderWithNonceRequestingIfMissing(nonce uint64) hdr, _, err = process.GetShardHeaderFromPoolWithNonce( nonce, boot.shardCoordinator.SelfId(), - boot.headers, - boot.headersNonces) + boot.headers) if err != nil { return nil, err } @@ -306,7 +290,7 @@ func (boot *ShardBootstrap) getHeaderWithHashRequestingIfMissing(hash []byte) (d if err != nil { _ = process.EmptyChannel(boot.chRcvHdrHash) boot.requestHeaderWithHash(hash) - err := boot.waitForHeaderHash() + err = boot.waitForHeaderHash() if err != nil { return nil, err } @@ -366,8 +350,7 @@ func (boot *ShardBootstrap) haveHeaderInPoolWithNonce(nonce uint64) bool { _, _, err := process.GetShardHeaderFromPoolWithNonce( nonce, boot.shardCoordinator.SelfId(), - boot.headers, - boot.headersNonces) + boot.headers) return err == nil } @@ -376,21 +359,16 @@ func (boot *ShardBootstrap) getShardHeaderFromPool(headerHash []byte) (data.Head return process.GetShardHeaderFromPool(headerHash, boot.headers) } -func (boot *ShardBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(shardId uint32, nonce uint64) { +func (boot *ShardBootstrap) requestMiniBlocksFromHeaderWithNonceIfMissing(headerHandler data.HeaderHandler) { nextBlockNonce := boot.getNonceForNextBlock() maxNonce := core.MinUint64(nextBlockNonce+process.MaxHeadersToRequestInAdvance-1, boot.forkDetector.ProbableHighestNonce()) - if nonce < nextBlockNonce || nonce > maxNonce { + if headerHandler.GetNonce() < nextBlockNonce || headerHandler.GetNonce() > maxNonce { return } - header, _, err := process.GetShardHeaderFromPoolWithNonce( - nonce, - shardId, - boot.headers, - boot.headersNonces) - - if err != nil { - log.Trace("GetShardHeaderFromPoolWithNonce", "error", err.Error()) + header, ok := headerHandler.(*block.Header) + if !ok { + log.Warn("cannot convert headerHandler in block.Header") return } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 6b0c4eba288..42b2fa40d64 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -15,7 +15,6 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -39,17 +38,6 @@ type removedFlags struct { func createMockResolversFinder() *mock.ResolversFinderStub { return &mock.ResolversFinderStub{ IntraShardResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { - if strings.Contains(baseTopic, factory.HeadersTopic) { - return &mock.HeaderResolverMock{ - RequestDataFromNonceCalled: func(nonce uint64) error { - return nil - }, - RequestDataFromHashCalled: func(hash []byte) error { - return nil - }, - }, nil - } - if strings.Contains(baseTopic, factory.MiniBlocksTopic) { return &mock.MiniBlocksResolverMock{ GetMiniBlocksCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { @@ -63,13 +51,8 @@ func createMockResolversFinder() *mock.ResolversFinderStub { return nil, nil }, - } -} - -func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { - return &mock.ResolversFinderStub{ - IntraShardResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { - if strings.Contains(baseTopic, factory.HeadersTopic) { + CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, err error) { + if strings.Contains(baseTopic, factory.ShardBlocksTopic) { return &mock.HeaderResolverMock{ RequestDataFromNonceCalled: func(nonce uint64) error { return nil @@ -80,6 +63,14 @@ func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { }, nil } + return nil, nil + }, + } +} + +func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { + return &mock.ResolversFinderStub{ + IntraShardResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { if strings.Contains(baseTopic, factory.MiniBlocksTopic) { return &mock.MiniBlocksResolverMock{ RequestDataFromHashCalled: func(hash []byte) error { @@ -97,6 +88,20 @@ func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { }, nil } + return nil, nil + }, + CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, err error) { + if strings.Contains(baseTopic, factory.ShardBlocksTopic) { + return &mock.HeaderResolverMock{ + RequestDataFromNonceCalled: func(nonce uint64) error { + return nil + }, + RequestDataFromHashCalled: func(hash []byte) error { + return nil + }, + }, nil + } + return nil, nil }, } @@ -104,29 +109,8 @@ func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { func createMockPools() *mock.PoolsHolderStub { pools := &mock.PoolsHolderStub{} - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{ - HasOrAddCalled: func(key []byte, value interface{}) (ok, evicted bool) { - return false, false - }, - RegisterHandlerCalled: func(func(key []byte)) {}, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return nil, false - }, - RemoveCalled: func(key []byte) { - return - }, - } - return sds - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - return nil, false - }, - RegisterHandlerCalled: func(handler func(nonce uint64, shardId uint32, hash []byte)) {}, - } - return hnc + pools.HeadersCalled = func() dataRetriever.HeadersPool { + return &mock.HeadersCacherStub{} } pools.MiniBlocksCalled = func() storage.Cacher { cs := &mock.CacherStub{ @@ -162,10 +146,9 @@ func generateTestCache() storage.Cacher { } func generateTestUnit() storage.Storer { - memDB, _ := memorydb.New() storer, _ := storageUnit.NewStorageUnit( generateTestCache(), - memDB, + memorydb.New(), ) return storer } @@ -199,53 +182,9 @@ func createBlockProcessor() *mock.BlockProcessorMock { return blockProcessorMock } -func createHeadersDataPool(removedHashCompare []byte, remFlags *removedFlags) storage.Cacher { - sds := &mock.CacherStub{ - HasOrAddCalled: func(key []byte, value interface{}) (ok, evicted bool) { - return false, false - }, - RegisterHandlerCalled: func(func(key []byte)) {}, - RemoveCalled: func(key []byte) { - if bytes.Equal(key, removedHashCompare) { - remFlags.flagHdrRemovedFromHeaders = true - } - }, - } - return sds -} - -func createHeadersNoncesDataPool( - getNonceCompare uint64, - getRetHash []byte, - removedNonce uint64, - remFlags *removedFlags, - shardId uint32, -) dataRetriever.Uint64SyncMapCacher { - - hnc := &mock.Uint64SyncMapCacherStub{ - RegisterHandlerCalled: func(handler func(nonce uint64, shardId uint32, hash []byte)) {}, - GetCalled: func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == getNonceCompare { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, getRetHash) - - return syncMap, true - } - - return nil, false - }, - RemoveCalled: func(nonce uint64, providedShardId uint32) { - if nonce == removedNonce && shardId == providedShardId { - remFlags.flagHdrRemovedFromNonces = true - } - }, - } - return hnc -} - func createForkDetector(removedNonce uint64, remFlags *removedFlags) process.ForkDetector { return &mock.ForkDetectorMock{ - RemoveHeadersCalled: func(nonce uint64, hash []byte) { + RemoveHeaderCalled: func(nonce uint64, hash []byte) { if nonce == removedNonce { remFlags.flagHdrRemovedFromForkDetector = true } @@ -320,7 +259,7 @@ func TestNewShardBootstrap_PoolsHolderRetNilOnHeadersShouldErr(t *testing.T) { t.Parallel() pools := createMockPools() - pools.HeadersCalled = func() storage.Cacher { + pools.HeadersCalled = func() dataRetriever.HeadersPool { return nil } @@ -357,46 +296,6 @@ func TestNewShardBootstrap_PoolsHolderRetNilOnHeadersShouldErr(t *testing.T) { assert.Equal(t, process.ErrNilHeadersDataPool, err) } -func TestNewShardBootstrap_PoolsHolderRetNilOnHeadersNoncesShouldErr(t *testing.T) { - t.Parallel() - - pools := createMockPools() - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return nil - } - blkc := initBlockchain() - rnd := &mock.RounderMock{} - blkExec := &mock.BlockProcessorMock{} - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - forkDetector := &mock.ForkDetectorMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - account := &mock.AccountsStub{} - - bs, err := sync.NewShardBootstrap( - pools, - createStore(), - blkc, - rnd, - blkExec, - waitTime, - hasher, - marshalizer, - forkDetector, - &mock.ResolversFinderStub{}, - shardCoordinator, - account, - &mock.BlackListHandlerStub{}, - &mock.NetworkConnectionWatcherStub{}, - &mock.BoostrapStorerMock{}, - &mock.StorageBootstrapperMock{}, - &mock.RequestedItemsHandlerStub{}, - ) - - assert.Nil(t, bs) - assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) -} - func TestNewShardBootstrap_PoolsHolderRetNilOnTxBlockBodyShouldErr(t *testing.T) { t.Parallel() @@ -842,14 +741,17 @@ func TestNewShardBootstrap_NilHeaderResolverShouldErr(t *testing.T) { pools := createMockPools() resFinder := &mock.ResolversFinderStub{ IntraShardResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { - if strings.Contains(baseTopic, factory.HeadersTopic) { - return nil, errExpected - } - if strings.Contains(baseTopic, factory.MiniBlocksTopic) { return &mock.ResolverStub{}, nil } + return nil, nil + }, + CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, err error) { + if strings.Contains(baseTopic, factory.ShardBlocksTopic) { + return nil, errExpected + } + return nil, nil }, } @@ -894,14 +796,17 @@ func TestNewShardBootstrap_NilTxBlockBodyResolverShouldErr(t *testing.T) { pools := createMockPools() resFinder := &mock.ResolversFinderStub{ IntraShardResolverCalled: func(baseTopic string) (resolver dataRetriever.Resolver, e error) { - if strings.Contains(baseTopic, factory.HeadersTopic) { - return &mock.HeaderResolverMock{}, errExpected - } - if strings.Contains(baseTopic, factory.MiniBlocksTopic) { return nil, errExpected } + return nil, nil + }, + CrossShardResolverCalled: func(baseTopic string, crossShard uint32) (resolver dataRetriever.Resolver, err error) { + if strings.Contains(baseTopic, factory.ShardBlocksTopic) { + return &mock.HeaderResolverMock{}, errExpected + } + return nil, nil }, } @@ -945,26 +850,16 @@ func TestNewShardBootstrap_OkValsShouldWork(t *testing.T) { wasCalled := 0 pools := &mock.PoolsHolderStub{} - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} - sds.HasOrAddCalled = func(key []byte, value interface{}) (ok, evicted bool) { + sds.AddCalled = func(headerHash []byte, header data.HeaderHandler) { assert.Fail(t, "should have not reached this point") - return false, false } - - sds.RegisterHandlerCalled = func(func(key []byte)) { - } - - return sds - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { wasCalled++ } - - return hnc + return sds } pools.MiniBlocksCalled = func() storage.Cacher { cs := &mock.CacherStub{} @@ -1048,7 +943,7 @@ func TestBootstrap_SyncBlockShouldCallForkChoice(t *testing.T) { Hash: []byte("hash"), } } - forkDetector.RemoveHeadersCalled = func(nonce uint64, hash []byte) { + forkDetector.RemoveHeaderCalled = func(nonce uint64, hash []byte) { } forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { return hdr.Nonce @@ -1166,45 +1061,20 @@ func TestBootstrap_ShouldReturnTimeIsOutWhenMissingBody(t *testing.T) { return &hdr } - shardId := uint32(0) hash := []byte("aaa") pools := createMockPools() - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { if bytes.Equal(hash, key) { - return &block.Header{Nonce: 2}, true + return &block.Header{Nonce: 2}, nil } - return nil, false + return nil, errors.New("err") } - - sds.RegisterHandlerCalled = func(func(key []byte)) { - } - - sds.RemoveCalled = func(key []byte) { - } - return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == 2 { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, hash) - - return syncMap, true - } - - return nil, false - } - - return hnc - } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} @@ -1332,17 +1202,15 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { return &hdr } - shardId := uint32(0) hash := []byte("aaa") mutDataAvailable := goSync.RWMutex{} dataAvailable := false pools := &mock.PoolsHolderStub{} - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { mutDataAvailable.RLock() defer mutDataAvailable.RUnlock() @@ -1351,35 +1219,17 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { Nonce: 2, Round: 1, BlockBodyType: block.TxBlock, - RootHash: []byte("bbb")}, true + RootHash: []byte("bbb")}, nil } - return nil, false + return nil, errors.New("err") } - sds.RegisterHandlerCalled = func(func(key []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - mutDataAvailable.RLock() - defer mutDataAvailable.RUnlock() - - if u == 2 && dataAvailable { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, hash) - - return syncMap, true - } - - return nil, false - } - return hnc - } pools.MiniBlocksCalled = func() storage.Cacher { cs := &mock.CacherStub{} cs.RegisterHandlerCalled = func(i func(key []byte)) { @@ -1417,7 +1267,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { return nil, nil } - rnd, _ := round.NewRound(time.Now(), time.Now().Add(200*time.Millisecond), time.Duration(100*time.Millisecond), &mock.SyncTimerMock{}) + rnd, _ := round.NewRound(time.Now(), time.Now().Add(200*time.Millisecond), 100*time.Millisecond, &mock.SyncTimerMock{}) bs, _ := sync.NewShardBootstrap( pools, @@ -1451,7 +1301,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { dataAvailable = true mutDataAvailable.Unlock() - time.Sleep(200 * time.Millisecond) + time.Sleep(500 * time.Millisecond) bs.StopSync() } @@ -1467,45 +1317,26 @@ func TestBootstrap_ShouldReturnNilErr(t *testing.T) { return &hdr } - shardId := uint32(0) hash := []byte("aaa") + header := &block.Header{ + Nonce: 2, + Round: 1, + BlockBodyType: block.TxBlock, + RootHash: []byte("bbb")} pools := &mock.PoolsHolderStub{} - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(hash, key) { - return &block.Header{ - Nonce: 2, - Round: 1, - BlockBodyType: block.TxBlock, - RootHash: []byte("bbb")}, true + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + if hdrNonce == 2 { + return []data.HeaderHandler{header}, [][]byte{hash}, nil } - return nil, false - } - - sds.RegisterHandlerCalled = func(func(key []byte)) { + return nil, nil, errors.New("err") } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == 2 { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, hash) - - return syncMap, true - } - - return nil, false - } - return hnc - } pools.MiniBlocksCalled = func() storage.Cacher { cs := &mock.CacherStub{} cs.RegisterHandlerCalled = func(i func(key []byte)) { @@ -1583,45 +1414,25 @@ func TestBootstrap_SyncBlockShouldReturnErrorWhenProcessBlockFailed(t *testing.T return &hdr } - shardId := uint32(0) hash := []byte("aaa") + header := &block.Header{ + Nonce: 2, + Round: 1, + BlockBodyType: block.TxBlock, + RootHash: []byte("bbb")} pools := &mock.PoolsHolderStub{} - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(hash, key) { - return &block.Header{ - Nonce: 2, - Round: 1, - BlockBodyType: block.TxBlock, - RootHash: []byte("bbb")}, true + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + if hdrNonce == 2 { + return []data.HeaderHandler{header}, [][]byte{hash}, nil } - - return nil, false + return nil, nil, errors.New("err") } - sds.RegisterHandlerCalled = func(func(key []byte)) {} - sds.RemoveCalled = func(key []byte) {} return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == 2 { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, hash) - - return syncMap, true - } - - return nil, false - } - hnc.RemoveCalled = func(nonce uint64, shardId uint32) {} - return hnc - } pools.MiniBlocksCalled = func() storage.Cacher { cs := &mock.CacherStub{} cs.RegisterHandlerCalled = func(i func(key []byte)) { @@ -1649,8 +1460,7 @@ func TestBootstrap_SyncBlockShouldReturnErrorWhenProcessBlockFailed(t *testing.T forkDetector.ProbableHighestNonceCalled = func() uint64 { return 2 } - forkDetector.RemoveHeadersCalled = func(nonce uint64, hash []byte) {} - forkDetector.ResetProbableHighestNonceCalled = func() {} + forkDetector.RemoveHeaderCalled = func(nonce uint64, hash []byte) {} forkDetector.GetNotarizedHeaderHashCalled = func(nonce uint64) []byte { return nil } @@ -1908,23 +1718,41 @@ func TestBootstrap_ShouldSyncShouldReturnTrueWhenForkIsDetectedAndItReceivesTheS return &hdr1 } - finalHeaders := []data.HeaderHandler{ + selfNotarizedHeaders := []data.HeaderHandler{ &hdr2, } - finalHeadersHashes := [][]byte{ + selfNotarizedHeadersHashes := [][]byte{ hash2, } pools := createMockPools() - pools.HeadersCalled = func() storage.Cacher { - return sync.GetCacherWithHeaders(&hdr1, &hdr2, hash1, hash2) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{ + RegisterHandlerCalled: func(func(header data.HeaderHandler, key []byte)) {}, + GetHeaderByHashCalled: func(key []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(key, hash1) { + return &hdr1, nil + } + if bytes.Equal(key, hash2) { + return &hdr2, nil + } + + return nil, errors.New("err") + }, + } + return sds } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} rounder := &mock.RounderMock{} rounder.RoundIndex = 2 - forkDetector, _ := sync.NewShardForkDetector(rounder, &mock.BlackListHandlerStub{}, 0) + forkDetector, _ := sync.NewShardForkDetector( + rounder, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} @@ -1952,17 +1780,17 @@ func TestBootstrap_ShouldSyncShouldReturnTrueWhenForkIsDetectedAndItReceivesTheS &mock.RequestedItemsHandlerStub{}, ) - _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil, false) - _ = forkDetector.AddHeader(&hdr2, hash2, process.BHNotarized, finalHeaders, finalHeadersHashes, false) + _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil) + _ = forkDetector.AddHeader(&hdr2, hash2, process.BHNotarized, selfNotarizedHeaders, selfNotarizedHeadersHashes) shouldSync := bs.ShouldSync() assert.True(t, shouldSync) assert.True(t, bs.IsForkDetected()) if shouldSync && bs.IsForkDetected() { - forkDetector.RemoveHeaders(hdr1.GetNonce(), hash1) - bs.ReceivedHeaders(hash1) - _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil, false) + forkDetector.RemoveHeader(hdr1.GetNonce(), hash1) + bs.ReceivedHeaders(&hdr1, hash1) + _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil) } shouldSync = bs.ShouldSync() @@ -1984,23 +1812,41 @@ func TestBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceivesThe return &hdr2 } - finalHeaders := []data.HeaderHandler{ + selfNotarizedHeaders := []data.HeaderHandler{ &hdr2, } - finalHeadersHashes := [][]byte{ + selfNotarizedHeadersHashes := [][]byte{ hash2, } pools := createMockPools() - pools.HeadersCalled = func() storage.Cacher { - return sync.GetCacherWithHeaders(&hdr1, &hdr2, hash1, hash2) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{ + RegisterHandlerCalled: func(func(header data.HeaderHandler, key []byte)) {}, + GetHeaderByHashCalled: func(key []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(key, hash1) { + return &hdr1, nil + } + if bytes.Equal(key, hash2) { + return &hdr2, nil + } + + return nil, errors.New("err") + }, + } + return sds } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} rounder := &mock.RounderMock{} rounder.RoundIndex = 2 - forkDetector, _ := sync.NewShardForkDetector(rounder, &mock.BlackListHandlerStub{}, 0) + forkDetector, _ := sync.NewShardForkDetector( + rounder, + &mock.BlackListHandlerStub{}, + &mock.BlockTrackerMock{}, + 0, + ) shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} @@ -2028,17 +1874,17 @@ func TestBootstrap_ShouldSyncShouldReturnFalseWhenForkIsDetectedAndItReceivesThe &mock.RequestedItemsHandlerStub{}, ) - _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil, false) - _ = forkDetector.AddHeader(&hdr2, hash2, process.BHNotarized, finalHeaders, finalHeadersHashes, false) + _ = forkDetector.AddHeader(&hdr1, hash1, process.BHProcessed, nil, nil) + _ = forkDetector.AddHeader(&hdr2, hash2, process.BHNotarized, selfNotarizedHeaders, selfNotarizedHeadersHashes) shouldSync := bs.ShouldSync() assert.True(t, shouldSync) assert.True(t, bs.IsForkDetected()) if shouldSync && bs.IsForkDetected() { - forkDetector.RemoveHeaders(hdr1.GetNonce(), hash1) - bs.ReceivedHeaders(hash2) - _ = forkDetector.AddHeader(&hdr2, hash2, process.BHProcessed, finalHeaders, finalHeadersHashes, false) + forkDetector.RemoveHeader(hdr1.GetNonce(), hash1) + bs.ReceivedHeaders(&hdr2, hash2) + _ = forkDetector.AddHeader(&hdr2, hash2, process.BHProcessed, selfNotarizedHeaders, selfNotarizedHeadersHashes) } shouldSync = bs.ShouldSync() @@ -2082,7 +1928,7 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnNil(t *testing.T) { &mock.RequestedItemsHandlerStub{}, ) - hdr, _, _ := process.GetShardHeaderFromPoolWithNonce(0, 0, pools.Headers(), pools.HeadersNonces()) + hdr, _, _ := process.GetShardHeaderFromPoolWithNonce(0, 0, pools.Headers()) assert.NotNil(t, bs) assert.Nil(t, hdr) } @@ -2091,42 +1937,25 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { t.Parallel() hdr := &block.Header{Nonce: 0} - - shardId := uint32(0) hash := []byte("aaa") pools := createMockPools() - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { - if bytes.Equal(hash, key) { - return hdr, true + sds.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + if hdrNonce == 0 { + return []data.HeaderHandler{hdr}, [][]byte{hash}, nil } - return nil, false + return nil, nil, errors.New("err") } - sds.RegisterHandlerCalled = func(func(key []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - hnc.GetCalled = func(u uint64) (dataRetriever.ShardIdHashMap, bool) { - if u == 0 { - syncMap := &dataPool.ShardIdHashSyncMap{} - syncMap.Store(shardId, hash) - return syncMap, true - } - - return nil, false - } - - return hnc - } hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} @@ -2156,7 +1985,7 @@ func TestBootstrap_GetHeaderFromPoolShouldReturnHeader(t *testing.T) { &mock.RequestedItemsHandlerStub{}, ) - hdr2, _, _ := process.GetShardHeaderFromPoolWithNonce(0, 0, pools.Headers(), pools.HeadersNonces()) + hdr2, _, _ := process.GetShardHeaderFromPoolWithNonce(0, 0, pools.Headers()) assert.NotNil(t, bs) assert.True(t, hdr == hdr2) } @@ -2225,17 +2054,18 @@ func TestBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *testing. addedHdr := &block.Header{} pools := createMockPools() - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - sds.RegisterHandlerCalled = func(func(key []byte)) { + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } - sds.PeekCalled = func(key []byte) (value interface{}, ok bool) { + sds.GetHeaderByHashCalled = func(key []byte) (handler data.HeaderHandler, e error) { if bytes.Equal(key, addedHash) { - return addedHdr, true + return addedHdr, nil } - return nil, false + return nil, errors.New("err") } + return sds } @@ -2243,7 +2073,7 @@ func TestBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *testing. hasher := &mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} forkDetector := &mock.ForkDetectorMock{} - forkDetector.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { + forkDetector.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, selfNotarizedHeaders []data.HeaderHandler, selfNotarizedHeadersHashes [][]byte) error { if state == process.BHProcessed { return errors.New("processed") } @@ -2259,6 +2089,9 @@ func TestBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *testing. wasAdded = true return nil } + forkDetector.ProbableHighestNonceCalled = func() uint64 { + return 0 + } shardCoordinator := mock.NewOneShardCoordinatorMock() account := &mock.AccountsStub{} @@ -2285,92 +2118,11 @@ func TestBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *testing. &mock.RequestedItemsHandlerStub{}, ) - bs.ReceivedHeaders(addedHash) + bs.ReceivedHeaders(addedHdr, addedHash) assert.True(t, wasAdded) } -func TestBootstrap_ReceivedHeadersNotFoundInPoolShouldNotAddToForkDetector(t *testing.T) { - t.Parallel() - - addedHash := []byte("hash") - addedHdr := &block.Header{} - - pools := createMockPools() - - wasAdded := false - hasher := &mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} - forkDetector := &mock.ForkDetectorMock{} - forkDetector.AddHeaderCalled = func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte, isNotarizedShardStuck bool) error { - if state == process.BHProcessed { - return errors.New("processed") - } - - if !bytes.Equal(hash, addedHash) { - return errors.New("hash mismatch") - } - - if !reflect.DeepEqual(header, addedHdr) { - return errors.New("header mismatch") - } - - wasAdded = true - return nil - } - - shardCoordinator := mock.NewOneShardCoordinatorMock() - account := &mock.AccountsStub{} - - headerStorage := &mock.StorerStub{} - headerStorage.GetCalled = func(key []byte) (i []byte, e error) { - if bytes.Equal(key, addedHash) { - buff, _ := marshalizer.Marshal(addedHdr) - - return buff, nil - } - - return nil, nil - } - - store := createFullStore() - store.AddStorer(dataRetriever.BlockHeaderUnit, headerStorage) - - blkc, _ := blockchain.NewBlockChain( - &mock.CacherStub{}, - ) - - _ = blkc.SetAppStatusHandler(&mock.AppStatusHandlerStub{ - SetUInt64ValueHandler: func(key string, value uint64) {}, - }) - - rnd, _ := round.NewRound(time.Now(), time.Now(), 100*time.Millisecond, &mock.SyncTimerMock{}) - - bs, _ := sync.NewShardBootstrap( - pools, - store, - blkc, - rnd, - &mock.BlockProcessorMock{}, - waitTime, - hasher, - marshalizer, - forkDetector, - createMockResolversFinder(), - shardCoordinator, - account, - &mock.BlackListHandlerStub{}, - &mock.NetworkConnectionWatcherStub{}, - &mock.BoostrapStorerMock{}, - &mock.StorageBootstrapperMock{}, - &mock.RequestedItemsHandlerStub{}, - ) - - bs.ReceivedHeaders(addedHash) - - assert.False(t, wasAdded) -} - //------- RollBack func TestBootstrap_RollBackNilBlockchainHeaderShouldErr(t *testing.T) { @@ -2458,20 +2210,17 @@ func TestBootstrap_RollBackIsNotEmptyShouldErr(t *testing.T) { newHdrNonce := uint64(6) remFlags := &removedFlags{} - shardId := uint32(0) pools := createMockPools() - pools.HeadersCalled = func() storage.Cacher { - return createHeadersDataPool(newHdrHash, remFlags) - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return createHeadersNoncesDataPool( - newHdrNonce, - newHdrHash, - newHdrNonce, - remFlags, - shardId, - ) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{ + RemoveHeaderByHashCalled: func(key []byte) { + if bytes.Equal(key, newHdrHash) { + remFlags.flagHdrRemovedFromHeaders = true + } + }, + } + return sds } blkc := initBlockchain() rnd := &mock.RounderMock{} @@ -2518,8 +2267,6 @@ func TestBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *testin //retain if the remove process from different storage locations has been called remFlags := &removedFlags{} - shardId := uint32(0) - currentHdrNonce := uint64(8) currentHdrHash := []byte("current header hash") @@ -2541,18 +2288,15 @@ func TestBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *testin pools := createMockPools() //data pool headers - pools.HeadersCalled = func() storage.Cacher { - return createHeadersDataPool(currentHdrHash, remFlags) - } - //data pool headers-nonces - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return createHeadersNoncesDataPool( - currentHdrNonce, - currentHdrHash, - currentHdrNonce, - remFlags, - shardId, - ) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{ + RemoveHeaderByHashCalled: func(key []byte) { + if bytes.Equal(key, currentHdrHash) { + remFlags.flagHdrRemovedFromHeaders = true + } + }, + } + return sds } //a mock blockchain with special header and tx block bodies stubs (defined above) @@ -2676,8 +2420,7 @@ func TestBootstrap_RollBackIsEmptyCallRollBackOneBlockOkValsShouldWork(t *testin err := bs.RollBack(true) assert.Nil(t, err) - assert.True(t, remFlags.flagHdrRemovedFromNonces) - assert.False(t, remFlags.flagHdrRemovedFromHeaders) + assert.True(t, remFlags.flagHdrRemovedFromHeaders) assert.True(t, remFlags.flagHdrRemovedFromStorage) assert.True(t, remFlags.flagHdrRemovedFromForkDetector) assert.Equal(t, blkc.GetCurrentBlockHeader(), prevHdr) @@ -2690,7 +2433,6 @@ func TestBootstrap_RollbackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t *tes //retain if the remove process from different storage locations has been called remFlags := &removedFlags{} - shardId := uint32(0) currentHdrNonce := uint64(1) currentHdrHash := []byte("current header hash") @@ -2713,18 +2455,15 @@ func TestBootstrap_RollbackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t *tes pools := createMockPools() //data pool headers - pools.HeadersCalled = func() storage.Cacher { - return createHeadersDataPool(currentHdrHash, remFlags) - } - //data pool headers-nonces - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - return createHeadersNoncesDataPool( - currentHdrNonce, - currentHdrHash, - currentHdrNonce, - remFlags, - shardId, - ) + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{ + RemoveHeaderByHashCalled: func(key []byte) { + if bytes.Equal(key, currentHdrHash) { + remFlags.flagHdrRemovedFromHeaders = true + } + }, + } + return sds } //a mock blockchain with special header and tx block bodies stubs (defined above) @@ -2850,8 +2589,7 @@ func TestBootstrap_RollbackIsEmptyCallRollBackOneBlockToGenesisShouldWork(t *tes err := bs.RollBack(true) assert.Nil(t, err) - assert.True(t, remFlags.flagHdrRemovedFromNonces) - assert.False(t, remFlags.flagHdrRemovedFromHeaders) + assert.True(t, remFlags.flagHdrRemovedFromHeaders) assert.True(t, remFlags.flagHdrRemovedFromStorage) assert.True(t, remFlags.flagHdrRemovedFromForkDetector) assert.Nil(t, blkc.GetCurrentBlockHeader()) @@ -3163,25 +2901,18 @@ func TestShardBootstrap_SetStatusHandlerNilHandlerShouldErr(t *testing.T) { t.Parallel() pools := &mock.PoolsHolderStub{} - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} - sds.HasOrAddCalled = func(key []byte, value interface{}) (ok, evicted bool) { + sds.AddCalled = func(headerHash []byte, header data.HeaderHandler) { assert.Fail(t, "should have not reached this point") - return false, false } - sds.RegisterHandlerCalled = func(func(key []byte)) { + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } return sds } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{} - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - - return hnc - } pools.MiniBlocksCalled = func() storage.Cacher { cs := &mock.CacherStub{} cs.RegisterHandlerCalled = func(i func(key []byte)) {} @@ -3227,29 +2958,24 @@ func TestShardBootstrap_RequestMiniBlocksFromHeaderWithNonceIfMissing(t *testing t.Parallel() requestDataWasCalled := false + hdrHash := []byte("hash") + hdr := &block.Header{Round: 5, Nonce: 1} pools := &mock.PoolsHolderStub{} - pools.HeadersCalled = func() storage.Cacher { - sds := &mock.CacherStub{} - sds.RegisterHandlerCalled = func(func(key []byte)) { + pools.HeadersCalled = func() dataRetriever.HeadersPool { + sds := &mock.HeadersCacherStub{} + sds.RegisterHandlerCalled = func(func(header data.HeaderHandler, key []byte)) { } - - sds.PeekCalled = func(key []byte) (interface{}, bool) { - hdr := block.Header{Round: 5} - return &hdr, true + sds.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + return []data.HeaderHandler{hdr}, [][]byte{[]byte("hash")}, nil } - return sds - } - pools.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { - hnc := &mock.Uint64SyncMapCacherStub{ - GetCalled: func(nonce uint64) (dataRetriever.ShardIdHashMap, bool) { - shIdSyncMap := dataPool.ShardIdHashSyncMap{} - shIdSyncMap.Store(uint32(0), []byte("hash")) - return &shIdSyncMap, true - }, + sds.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, err error) { + if bytes.Equal(hash, hdrHash) { + return hdr, nil + } + return nil, nil } - hnc.RegisterHandlerCalled = func(handler func(nonce uint64, shardId uint32, hash []byte)) {} - return hnc + return sds } pools.MiniBlocksCalled = func() storage.Cacher { @@ -3274,7 +3000,7 @@ func TestShardBootstrap_RequestMiniBlocksFromHeaderWithNonceIfMissing(t *testing } resFinder := createMockResolversFinderNilMiniBlocks() resFinder.IntraShardResolverCalled = func(baseTopic string) (resolver dataRetriever.Resolver, e error) { - if strings.Contains(baseTopic, factory.HeadersTopic) { + if strings.Contains(baseTopic, factory.ShardBlocksTopic) { return &mock.HeaderResolverMock{ RequestDataFromHashCalled: func(hash []byte) error { return nil @@ -3343,6 +3069,6 @@ func TestShardBootstrap_RequestMiniBlocksFromHeaderWithNonceIfMissing(t *testing &mock.RequestedItemsHandlerStub{}, ) - bs.RequestMiniBlocksFromHeaderWithNonceIfMissing(uint32(0), uint64(1)) + bs.RequestMiniBlocksFromHeaderWithNonceIfMissing(hdr) assert.True(t, requestDataWasCalled) } diff --git a/process/sync/storageBootstrap/baseStorageBootstrapper.go b/process/sync/storageBootstrap/baseStorageBootstrapper.go index fa2512f52a9..07ca6aece12 100644 --- a/process/sync/storageBootstrap/baseStorageBootstrapper.go +++ b/process/sync/storageBootstrap/baseStorageBootstrapper.go @@ -9,6 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" + "github.com/ElrondNetwork/elrond-go/process/block/processedMb" "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" @@ -28,6 +29,7 @@ type ArgsStorageBootstrapper struct { BootstrapRoundIndex uint64 ShardCoordinator sharding.Coordinator ResolversFinder dataRetriever.ResolversFinder + BlockTracker process.BlockTracker } type storageBootstrapper struct { @@ -39,6 +41,7 @@ type storageBootstrapper struct { store dataRetriever.StorageService uint64Converter typeConverters.Uint64ByteSliceConverter shardCoordinator sharding.Coordinator + blockTracker process.BlockTracker bootstrapRoundIndex uint64 bootstrapper storageBootstrapperHandler @@ -60,6 +63,12 @@ func (st *storageBootstrapper) loadBlocks() error { if err != nil { break } + + if round == headerInfo.LastRound { + err = sync.ErrCorruptBootstrapFromStorageDb + break + } + storageHeadersInfo = append(storageHeadersInfo, headerInfo) if uint64(round) > st.bootstrapRoundIndex { @@ -89,13 +98,17 @@ func (st *storageBootstrapper) loadBlocks() error { } if err != nil { + log.Warn("bootstrapper", "error", err) st.restoreBlockChainToGenesis() - _ = st.bootStorer.SaveLastRound(0) + err = st.bootStorer.SaveLastRound(0) + log.LogIfError(err, "bootstorer") + return process.ErrNotEnoughValidBlocksInStorage } - processedMiniBlocks := process.ConvertSliceToProcessedMiniBlocksMap(headerInfo.ProcessedMiniBlocks) - st.displayProcessedMiniBlocks(processedMiniBlocks) + processedMiniBlocks := processedMb.NewProcessedMiniBlocks() + processedMiniBlocks.ConvertSliceToProcessedMiniBlocksMap(headerInfo.ProcessedMiniBlocks) + processedMiniBlocks.DisplayProcessedMiniBlocks() st.blkExecutor.ApplyProcessedMiniBlocks(processedMiniBlocks) @@ -145,17 +158,19 @@ func (st *storageBootstrapper) applyHeaderInfo(hdrInfo bootstrapStorage.Bootstra } func (st *storageBootstrapper) getBootInfos(hdrInfo bootstrapStorage.BootstrapData) ([]bootstrapStorage.BootstrapData, error) { - highestFinalNonce := hdrInfo.HighestFinalNonce - highestNonce := hdrInfo.LastHeader.Nonce + highestFinalBlockNonce := hdrInfo.HighestFinalBlockNonce + highestBlockNonce := hdrInfo.LastHeader.Nonce lastRound := hdrInfo.LastRound bootInfos := []bootstrapStorage.BootstrapData{hdrInfo} log.Debug("block info from storage", - "highest nonce", highestNonce, "lastFinalNone", highestFinalNonce, "last round", lastRound) + "highest block nonce", highestBlockNonce, + "highest final block nonce", highestFinalBlockNonce, + "last round", lastRound) - lowestNonce := core.MaxUint64(highestFinalNonce-1, 1) - for highestNonce > lowestNonce { + lowestNonce := core.MaxUint64(highestFinalBlockNonce-1, 1) + for highestBlockNonce > lowestNonce { strHdrI, err := st.bootStorer.Get(lastRound) if err != nil { log.Debug("cannot load header info from storage ", "error", err.Error()) @@ -163,7 +178,7 @@ func (st *storageBootstrapper) getBootInfos(hdrInfo bootstrapStorage.BootstrapDa } bootInfos = append(bootInfos, strHdrI) - highestNonce = strHdrI.LastHeader.Nonce + highestBlockNonce = strHdrI.LastHeader.Nonce lastRound = strHdrI.LastRound if lastRound == 0 { @@ -179,45 +194,60 @@ func (st *storageBootstrapper) applyBootInfos(bootInfos []bootstrapStorage.Boots defer func() { if err != nil { - st.blkExecutor.RestoreLastNotarizedHrdsToGenesis() - st.forkDetector.RestoreFinalCheckPointToGenesis() + st.forkDetector.RestoreToGenesis() + st.blockTracker.RestoreToGenesis() } }() for i := len(bootInfos) - 1; i >= 0; i-- { - log.Debug("apply block", - "nonce", bootInfos[i].LastHeader.Nonce, - "shardId", bootInfos[i].LastHeader.ShardId) - - lastNotarized := make(map[uint32]*sync.HdrInfo, len(bootInfos[i].LastNotarizedHeaders)) - for _, lastNotarizedHeader := range bootInfos[i].LastNotarizedHeaders { - log.Debug("added notarized header", - "nonce", lastNotarizedHeader.Nonce, - "shardId", lastNotarizedHeader.ShardId) - - lastNotarized[lastNotarizedHeader.ShardId] = &sync.HdrInfo{ - Nonce: lastNotarizedHeader.Nonce, - Hash: lastNotarizedHeader.Hash, - } - } + log.Debug("apply header", + "shard", bootInfos[i].LastHeader.ShardId, + "nonce", bootInfos[i].LastHeader.Nonce) - err = st.bootstrapper.applyNotarizedBlocks(lastNotarized) + err = st.bootstrapper.applyCrossNotarizedHeaders(bootInfos[i].LastCrossNotarizedHeaders) if err != nil { - log.Debug("cannot apply notarized block", "error", err.Error()) + log.Debug("cannot apply cross notarized headers", "error", err.Error()) + return err + } + + selfNotarizedHeadersHashes := make([][]byte, len(bootInfos[i].LastSelfNotarizedHeaders)) + for index, selfNotarizedHeader := range bootInfos[i].LastSelfNotarizedHeaders { + selfNotarizedHeadersHashes[index] = selfNotarizedHeader.Hash + } + selfNotarizedHeaders, err := st.bootstrapper.applySelfNotarizedHeaders(selfNotarizedHeadersHashes) + if err != nil { + log.Debug("cannot apply self notarized headers", "error", err.Error()) return err } - lastFinalHashes := make([][]byte, 0, len(bootInfos[i].LastFinals)) - for _, lastFinal := range bootInfos[i].LastFinals { - lastFinalHashes = append(lastFinalHashes, lastFinal.Hash) + header, err := st.bootstrapper.getHeader(bootInfos[i].LastHeader.Hash) + if err != nil { + return err } - err = st.addHeaderToForkDetector(bootInfos[i].LastHeader.Hash, lastFinalHashes) + log.Debug("add header to fork detector", + "shard", header.GetShardID(), + "round", header.GetRound(), + "nonce", header.GetNonce(), + "hash", bootInfos[i].LastHeader.Hash) + + err = st.forkDetector.AddHeader(header, bootInfos[i].LastHeader.Hash, process.BHProcessed, selfNotarizedHeaders, selfNotarizedHeadersHashes) if err != nil { - log.Debug("cannot apply final block", "error", err.Error()) return err } + + if i > 0 { + log.Debug("added self notarized header in block tracker", + "shard", header.GetShardID(), + "round", header.GetRound(), + "nonce", header.GetNonce(), + "hash", bootInfos[i].LastHeader.Hash) + + st.blockTracker.AddSelfNotarizedHeader(st.shardCoordinator.SelfId(), header, bootInfos[i].LastHeader.Hash) + } + + st.blockTracker.AddTrackedHeader(header, bootInfos[i].LastHeader.Hash) } return nil @@ -243,19 +273,6 @@ func (st *storageBootstrapper) cleanupStorage(headerInfo bootstrapStorage.Bootst "hash", headerInfo.Hash) } -func (st *storageBootstrapper) displayProcessedMiniBlocks(processedMiniBlocks map[string]map[string]struct{}) { - log.Debug("processed mini blocks applied") - - for metaBlockHash, miniBlocksHashes := range processedMiniBlocks { - log.Debug("processed", - "meta hash", []byte(metaBlockHash)) - for miniBlockHash := range miniBlocksHashes { - log.Debug("processed", - "mini block hash", []byte(miniBlockHash)) - } - } -} - func (st *storageBootstrapper) applyBlock(header data.HeaderHandler, headerHash []byte) error { blockBody, err := st.bootstrapper.getBlockBody(header) if err != nil { @@ -277,34 +294,6 @@ func (st *storageBootstrapper) applyBlock(header data.HeaderHandler, headerHash return nil } -func (st *storageBootstrapper) addHeaderToForkDetector(headerHash []byte, finalHeadersHashes [][]byte) error { - header, err := st.bootstrapper.getHeader(headerHash) - if err != nil { - return err - } - - log.Debug("added header to fork detector", - "nonce", header.GetNonce(), - "shardId", header.GetShardID()) - - finalHeaders := make([]data.HeaderHandler, 0, len(finalHeadersHashes)) - for _, hash := range finalHeadersHashes { - finalHeader, err := st.bootstrapper.getHeader(hash) - if err != nil { - return err - } - finalHeaders = append(finalHeaders, finalHeader) - log.Debug("added final header", "nonce", finalHeader.GetNonce()) - } - - err = st.forkDetector.AddHeader(header, headerHash, process.BHProcessed, finalHeaders, finalHeadersHashes, false) - if err != nil { - return err - } - - return nil -} - func (st *storageBootstrapper) restoreBlockChainToGenesis() { genesisHeader := st.blkc.GetGenesisHeader() err := st.blkExecutor.RevertStateToBlock(genesisHeader) diff --git a/process/sync/storageBootstrap/interface.go b/process/sync/storageBootstrap/interface.go index 3538090d3c7..c53be3d2fe4 100644 --- a/process/sync/storageBootstrap/interface.go +++ b/process/sync/storageBootstrap/interface.go @@ -2,14 +2,15 @@ package storageBootstrap import ( "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/process/sync" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" ) // StorageBootstrapper is the main interface for bootstrap from storage execution engine type storageBootstrapperHandler interface { getHeader(hash []byte) (data.HeaderHandler, error) getBlockBody(header data.HeaderHandler) (data.BodyHandler, error) - applyNotarizedBlocks(lastNotarized map[uint32]*sync.HdrInfo) error + applyCrossNotarizedHeaders(crossNotarizedHeaders []bootstrapStorage.BootstrapHeaderInfo) error + applySelfNotarizedHeaders(selfNotarizedHeadersHashes [][]byte) ([]data.HeaderHandler, error) cleanupNotarizedStorage(hash []byte) IsInterfaceNil() bool } diff --git a/process/sync/storageBootstrap/metaStorageBootstrapper.go b/process/sync/storageBootstrap/metaStorageBootstrapper.go index eccaabaa264..6f7c4bef439 100644 --- a/process/sync/storageBootstrap/metaStorageBootstrapper.go +++ b/process/sync/storageBootstrap/metaStorageBootstrapper.go @@ -5,7 +5,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/sync" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" ) type metaStorageBootstrapper struct { @@ -22,6 +22,7 @@ func NewMetaStorageBootstrapper(arguments ArgsStorageBootstrapper) (*metaStorage marshalizer: arguments.Marshalizer, store: arguments.Store, shardCoordinator: arguments.ShardCoordinator, + blockTracker: arguments.BlockTracker, uint64Converter: arguments.Uint64Converter, bootstrapRoundIndex: arguments.BootstrapRoundIndex, @@ -47,23 +48,21 @@ func (msb *metaStorageBootstrapper) IsInterfaceNil() bool { return msb == nil } -func (msb *metaStorageBootstrapper) applyNotarizedBlocks( - lastNotarized map[uint32]*sync.HdrInfo, -) error { - for i := uint32(0); i < msb.shardCoordinator.NumberOfShards(); i++ { - if lastNotarized[i] == nil { - continue - } - if lastNotarized[i].Hash == nil { - return sync.ErrNilHash - } - - headerHandler, err := process.GetShardHeaderFromStorage(lastNotarized[i].Hash, msb.marshalizer, msb.store) +func (msb *metaStorageBootstrapper) applyCrossNotarizedHeaders(crossNotarizedHeaders []bootstrapStorage.BootstrapHeaderInfo) error { + for _, crossNotarizedHeader := range crossNotarizedHeaders { + header, err := process.GetShardHeaderFromStorage(crossNotarizedHeader.Hash, msb.marshalizer, msb.store) if err != nil { return err } - msb.blkExecutor.AddLastNotarizedHdr(i, headerHandler) + log.Debug("added cross notarized header in block tracker", + "shard", crossNotarizedHeader.ShardId, + "round", header.GetRound(), + "nonce", header.GetNonce(), + "hash", crossNotarizedHeader.Hash) + + msb.blockTracker.AddCrossNotarizedHeader(crossNotarizedHeader.ShardId, header, crossNotarizedHeader.Hash) + msb.blockTracker.AddTrackedHeader(header, crossNotarizedHeader.Hash) } return nil @@ -118,3 +117,8 @@ func (msb *metaStorageBootstrapper) cleanupNotarizedStorage(metaBlockHash []byte } } } + +func (msb *metaStorageBootstrapper) applySelfNotarizedHeaders(selfNotarizedHeadersHashes [][]byte) ([]data.HeaderHandler, error) { + selfNotarizedHeaders := make([]data.HeaderHandler, 0) + return selfNotarizedHeaders, nil +} diff --git a/process/sync/storageBootstrap/shardStorageBootstrapper.go b/process/sync/storageBootstrap/shardStorageBootstrapper.go index 3004e25e88d..5ef6c4d6c1f 100644 --- a/process/sync/storageBootstrap/shardStorageBootstrapper.go +++ b/process/sync/storageBootstrap/shardStorageBootstrapper.go @@ -5,8 +5,8 @@ import ( "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" "github.com/ElrondNetwork/elrond-go/process/factory" - "github.com/ElrondNetwork/elrond-go/process/sync" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -25,6 +25,7 @@ func NewShardStorageBootstrapper(arguments ArgsStorageBootstrapper) (*shardStora marshalizer: arguments.Marshalizer, store: arguments.Store, shardCoordinator: arguments.ShardCoordinator, + blockTracker: arguments.BlockTracker, uint64Converter: arguments.Uint64Converter, bootstrapRoundIndex: arguments.BootstrapRoundIndex, @@ -85,26 +86,26 @@ func (ssb *shardStorageBootstrapper) getBlockBody(headerHandler data.HeaderHandl return block.Body(miniBlocks), nil } -func (ssb *shardStorageBootstrapper) applyNotarizedBlocks( - lastNotarized map[uint32]*sync.HdrInfo, -) error { - if len(lastNotarized) == 0 { - return nil - } +func (ssb *shardStorageBootstrapper) applyCrossNotarizedHeaders(crossNotarizedHeaders []bootstrapStorage.BootstrapHeaderInfo) error { + for _, crossNotarizedHeader := range crossNotarizedHeaders { + if crossNotarizedHeader.ShardId != sharding.MetachainShardId { + continue + } - if lastNotarized[sharding.MetachainShardId] == nil { - return sync.ErrNilNotarizedHeader - } - if lastNotarized[sharding.MetachainShardId].Hash == nil { - return sync.ErrNilHash - } + metaBlock, err := process.GetMetaHeaderFromStorage(crossNotarizedHeader.Hash, ssb.marshalizer, ssb.store) + if err != nil { + return err + } - metaBlock, err := process.GetMetaHeaderFromStorage(lastNotarized[sharding.MetachainShardId].Hash, ssb.marshalizer, ssb.store) - if err != nil { - return err - } + log.Debug("added cross notarized header in block tracker", + "shard", sharding.MetachainShardId, + "round", metaBlock.GetRound(), + "nonce", metaBlock.GetNonce(), + "hash", crossNotarizedHeader.Hash) - ssb.blkExecutor.AddLastNotarizedHdr(sharding.MetachainShardId, metaBlock) + ssb.blockTracker.AddCrossNotarizedHeader(sharding.MetachainShardId, metaBlock, crossNotarizedHeader.Hash) + ssb.blockTracker.AddTrackedHeader(metaBlock, crossNotarizedHeader.Hash) + } return nil } @@ -152,3 +153,25 @@ func (ssb *shardStorageBootstrapper) cleanupNotarizedStorage(shardHeaderHash []b } } } + +func (ssb *shardStorageBootstrapper) applySelfNotarizedHeaders(selfNotarizedHeadersHashes [][]byte) ([]data.HeaderHandler, error) { + selfNotarizedHeaders := make([]data.HeaderHandler, 0, len(selfNotarizedHeadersHashes)) + for _, selfNotarizedHeaderHash := range selfNotarizedHeadersHashes { + selfNotarizedHeader, err := ssb.getHeader(selfNotarizedHeaderHash) + if err != nil { + return nil, err + } + + selfNotarizedHeaders = append(selfNotarizedHeaders, selfNotarizedHeader) + + log.Debug("added self notarized header in block tracker", + "shard", sharding.MetachainShardId, + "round", selfNotarizedHeader.GetRound(), + "nonce", selfNotarizedHeader.GetNonce(), + "hash", selfNotarizedHeaderHash) + + ssb.blockTracker.AddSelfNotarizedHeader(sharding.MetachainShardId, selfNotarizedHeader, selfNotarizedHeaderHash) + } + + return selfNotarizedHeaders, nil +} diff --git a/process/track/argBlockTrack.go b/process/track/argBlockTrack.go new file mode 100644 index 00000000000..1af467c2d5d --- /dev/null +++ b/process/track/argBlockTrack.go @@ -0,0 +1,38 @@ +package track + +import ( + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// ArgBaseTracker holds all dependencies required by the process data factory in order to create +// new instances of shard/meta block tracker +type ArgBaseTracker struct { + Hasher hashing.Hasher + HeaderValidator process.HeaderConstructionValidator + Marshalizer marshal.Marshalizer + RequestHandler process.RequestHandler + Rounder consensus.Rounder + ShardCoordinator sharding.Coordinator + Store dataRetriever.StorageService + StartHeaders map[uint32]data.HeaderHandler +} + +// ArgShardTracker holds all dependencies required by the process data factory in order to create +// new instances of shard block tracker +type ArgShardTracker struct { + ArgBaseTracker + PoolsHolder dataRetriever.PoolsHolder +} + +// ArgMetaTracker holds all dependencies required by the process data factory in order to create +// new instances of meta block tracker +type ArgMetaTracker struct { + ArgBaseTracker + PoolsHolder dataRetriever.MetaPoolsHolder +} diff --git a/process/track/baseBlockTrack.go b/process/track/baseBlockTrack.go new file mode 100644 index 00000000000..d389bf51ae8 --- /dev/null +++ b/process/track/baseBlockTrack.go @@ -0,0 +1,518 @@ +package track + +import ( + "bytes" + "sort" + "sync" + + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +var log = logger.GetOrCreate("process/track") + +type headerInfo struct { + hash []byte + header data.HeaderHandler +} + +type baseBlockTrack struct { + hasher hashing.Hasher + headerValidator process.HeaderConstructionValidator + marshalizer marshal.Marshalizer + rounder consensus.Rounder + shardCoordinator sharding.Coordinator + headersPool dataRetriever.HeadersPool + store dataRetriever.StorageService + + blockProcessor blockProcessorHandler + crossNotarizer blockNotarizerHandler + selfNotarizer blockNotarizerHandler + crossNotarizedHeadersNotifier blockNotifierHandler + selfNotarizedHeadersNotifier blockNotifierHandler + + mutHeaders sync.RWMutex + headers map[uint32]map[uint64][]*headerInfo +} + +func (bbt *baseBlockTrack) receivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { + if headerHandler.GetShardID() == sharding.MetachainShardId { + bbt.receivedMetaBlock(headerHandler, headerHash) + return + } + + bbt.receivedShardHeader(headerHandler, headerHash) +} + +func (bbt *baseBlockTrack) receivedShardHeader(headerHandler data.HeaderHandler, shardHeaderHash []byte) { + shardHeader, ok := headerHandler.(*block.Header) + if !ok { + log.Warn("cannot convert data.HeaderHandler in *block.Header") + return + } + + log.Debug("received shard header from network in block tracker", + "shard", shardHeader.GetShardID(), + "round", shardHeader.GetRound(), + "nonce", shardHeader.GetNonce(), + "hash", shardHeaderHash, + ) + + if bbt.isHeaderOutOfRange(shardHeader) { + return + } + + bbt.addHeader(shardHeader, shardHeaderHash) + bbt.blockProcessor.processReceivedHeader(shardHeader) +} + +func (bbt *baseBlockTrack) receivedMetaBlock(headerHandler data.HeaderHandler, metaBlockHash []byte) { + metaBlock, ok := headerHandler.(*block.MetaBlock) + if !ok { + log.Warn("cannot convert data.HeaderHandler in *block.Metablock") + return + } + + log.Debug("received meta block from network in block tracker", + "shard", metaBlock.GetShardID(), + "round", metaBlock.GetRound(), + "nonce", metaBlock.GetNonce(), + "hash", metaBlockHash, + ) + + if bbt.isHeaderOutOfRange(metaBlock) { + return + } + + bbt.addHeader(metaBlock, metaBlockHash) + bbt.blockProcessor.processReceivedHeader(metaBlock) +} + +func (bbt *baseBlockTrack) isHeaderOutOfRange(header data.HeaderHandler) bool { + var lastNotarizedHeaderNonce uint64 + + isHeaderForSelfShard := header.GetShardID() == bbt.shardCoordinator.SelfId() + if isHeaderForSelfShard { + lastNotarizedHeaderNonce = bbt.selfNotarizer.getLastNotarizedHeaderNonce(header.GetShardID()) + } else { + lastNotarizedHeaderNonce = bbt.crossNotarizer.getLastNotarizedHeaderNonce(header.GetShardID()) + } + + if header.GetNonce() > lastNotarizedHeaderNonce+process.MaxNonceDifferences { + log.Debug("received header is out of range", + "received nonce", header.GetNonce(), + "last notarized nonce", lastNotarizedHeaderNonce, + ) + return true + } + + return false +} + +func (bbt *baseBlockTrack) addHeader(header data.HeaderHandler, hash []byte) { + if check.IfNil(header) { + return + } + + bbt.mutHeaders.Lock() + defer bbt.mutHeaders.Unlock() + + shardID := header.GetShardID() + nonce := header.GetNonce() + + headersForShard, ok := bbt.headers[shardID] + if !ok { + headersForShard = make(map[uint64][]*headerInfo) + bbt.headers[shardID] = headersForShard + } + + for _, headerInfo := range headersForShard[nonce] { + if bytes.Equal(headerInfo.hash, hash) { + return + } + } + + headersForShard[nonce] = append(headersForShard[nonce], &headerInfo{hash: hash, header: header}) +} + +// AddCrossNotarizedHeader adds cross notarized header to the tracker lists +func (bbt *baseBlockTrack) AddCrossNotarizedHeader( + shardID uint32, + crossNotarizedHeader data.HeaderHandler, + crossNotarizedHeaderHash []byte, +) { + bbt.crossNotarizer.addNotarizedHeader(shardID, crossNotarizedHeader, crossNotarizedHeaderHash) +} + +// AddSelfNotarizedHeader adds self notarized headers to the tracker lists +func (bbt *baseBlockTrack) AddSelfNotarizedHeader( + shardID uint32, + selfNotarizedHeader data.HeaderHandler, + selfNotarizedHeaderHash []byte, +) { + bbt.selfNotarizer.addNotarizedHeader(shardID, selfNotarizedHeader, selfNotarizedHeaderHash) +} + +// AddTrackedHeader adds tracked headers to the tracker lists +func (bbt *baseBlockTrack) AddTrackedHeader(header data.HeaderHandler, hash []byte) { + bbt.addHeader(header, hash) +} + +// CleanupHeadersBehindNonce removes from local pools old headers for a given shard +func (bbt *baseBlockTrack) CleanupHeadersBehindNonce( + shardID uint32, + selfNotarizedNonce uint64, + crossNotarizedNonce uint64, +) { + bbt.selfNotarizer.cleanupNotarizedHeadersBehindNonce(shardID, selfNotarizedNonce) + nonce := selfNotarizedNonce + + if shardID != bbt.shardCoordinator.SelfId() { + bbt.crossNotarizer.cleanupNotarizedHeadersBehindNonce(shardID, crossNotarizedNonce) + nonce = crossNotarizedNonce + } + + bbt.cleanupTrackedHeadersBehindNonce(shardID, nonce) +} + +func (bbt *baseBlockTrack) cleanupTrackedHeadersBehindNonce(shardID uint32, nonce uint64) { + if nonce == 0 { + return + } + + bbt.mutHeaders.Lock() + defer bbt.mutHeaders.Unlock() + + headersForShard, ok := bbt.headers[shardID] + if !ok { + return + } + + for headersNonce := range headersForShard { + if headersNonce < nonce { + delete(headersForShard, headersNonce) + } + } +} + +// ComputeLongestChain returns the longest valid chain for a given shard from a given header +func (bbt *baseBlockTrack) ComputeLongestChain(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) { + return bbt.blockProcessor.computeLongestChain(shardID, header) +} + +// ComputeLongestMetaChainFromLastNotarized returns the longest valid chain for metachain from its last cross notarized header +func (bbt *baseBlockTrack) ComputeLongestMetaChainFromLastNotarized() ([]data.HeaderHandler, [][]byte, error) { + lastCrossNotarizedHeader, _, err := bbt.GetLastCrossNotarizedHeader(sharding.MetachainShardId) + if err != nil { + return nil, nil, err + } + + hdrsForShard, hdrsHashesForShard := bbt.ComputeLongestChain(sharding.MetachainShardId, lastCrossNotarizedHeader) + + return hdrsForShard, hdrsHashesForShard, nil +} + +// ComputeLongestShardsChainsFromLastNotarized returns the longest valid chains for all shards from theirs last cross notarized headers +func (bbt *baseBlockTrack) ComputeLongestShardsChainsFromLastNotarized() ([]data.HeaderHandler, [][]byte, map[uint32][]data.HeaderHandler, error) { + hdrsMap := make(map[uint32][]data.HeaderHandler) + hdrsHashesMap := make(map[uint32][][]byte) + + lastCrossNotarizedHeaders, err := bbt.GetLastCrossNotarizedHeadersForAllShards() + if err != nil { + return nil, nil, nil, err + } + + maxHdrLen := 0 + for shardID := uint32(0); shardID < bbt.shardCoordinator.NumberOfShards(); shardID++ { + hdrsForShard, hdrsHashesForShard := bbt.ComputeLongestChain(shardID, lastCrossNotarizedHeaders[shardID]) + + hdrsMap[shardID] = append(hdrsMap[shardID], hdrsForShard...) + hdrsHashesMap[shardID] = append(hdrsHashesMap[shardID], hdrsHashesForShard...) + + tmpHdrLen := len(hdrsForShard) + if maxHdrLen < tmpHdrLen { + maxHdrLen = tmpHdrLen + } + } + + orderedHeaders := make([]data.HeaderHandler, 0) + orderedHeadersHashes := make([][]byte, 0) + + // copy from map to lists - equality between number of headers per shard + for i := 0; i < maxHdrLen; i++ { + for shardID := uint32(0); shardID < bbt.shardCoordinator.NumberOfShards(); shardID++ { + hdrsForShard := hdrsMap[shardID] + hdrsHashesForShard := hdrsHashesMap[shardID] + if i >= len(hdrsForShard) { + continue + } + + orderedHeaders = append(orderedHeaders, hdrsForShard[i]) + orderedHeadersHashes = append(orderedHeadersHashes, hdrsHashesForShard[i]) + } + } + + return orderedHeaders, orderedHeadersHashes, hdrsMap, nil +} + +// DisplayTrackedHeaders displays tracked headers +func (bbt *baseBlockTrack) DisplayTrackedHeaders() { + for shardID := uint32(0); shardID < bbt.shardCoordinator.NumberOfShards(); shardID++ { + bbt.displayHeadersForShard(shardID) + } + + bbt.displayHeadersForShard(sharding.MetachainShardId) +} + +func (bbt *baseBlockTrack) displayHeadersForShard(shardID uint32) { + bbt.displayTrackedHeadersForShard(shardID, "tracked headers") + bbt.crossNotarizer.displayNotarizedHeaders(shardID, "cross notarized headers") + bbt.selfNotarizer.displayNotarizedHeaders(shardID, "self notarized headers") +} + +func (bbt *baseBlockTrack) displayTrackedHeadersForShard(shardID uint32, message string) { + headers, hashes := bbt.sortHeadersFromNonce(shardID, 0) + shouldNotDisplay := len(headers) == 0 || + len(headers) == 1 && headers[0].GetNonce() == 0 + if shouldNotDisplay { + return + } + + log.Debug(message, + "shard", shardID, + "nb", len(headers)) + + for index, header := range headers { + log.Trace("tracked header info", + "round", header.GetRound(), + "nonce", header.GetNonce(), + "hash", hashes[index]) + } +} + +// GetCrossNotarizedHeader returns a cross notarized header for a given shard with a given offset, behind last cross notarized header +func (bbt *baseBlockTrack) GetCrossNotarizedHeader(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + return bbt.crossNotarizer.getNotarizedHeader(shardID, offset) +} + +// GetLastCrossNotarizedHeader returns last cross notarized header for a given shard +func (bbt *baseBlockTrack) GetLastCrossNotarizedHeader(shardID uint32) (data.HeaderHandler, []byte, error) { + return bbt.crossNotarizer.getLastNotarizedHeader(shardID) +} + +// GetLastCrossNotarizedHeadersForAllShards returns last cross notarized headers for all shards +func (bbt *baseBlockTrack) GetLastCrossNotarizedHeadersForAllShards() (map[uint32]data.HeaderHandler, error) { + lastCrossNotarizedHeaders := make(map[uint32]data.HeaderHandler, bbt.shardCoordinator.NumberOfShards()) + + // save last committed header for verification + for shardID := uint32(0); shardID < bbt.shardCoordinator.NumberOfShards(); shardID++ { + lastCrossNotarizedHeader, _, err := bbt.GetLastCrossNotarizedHeader(shardID) + if err != nil { + return nil, err + } + + lastCrossNotarizedHeaders[shardID] = lastCrossNotarizedHeader + } + + return lastCrossNotarizedHeaders, nil +} + +// GetTrackedHeaders returns tracked headers for a given shard +func (bbt *baseBlockTrack) GetTrackedHeaders(shardID uint32) ([]data.HeaderHandler, [][]byte) { + return bbt.sortHeadersFromNonce(shardID, 0) +} + +// GetTrackedHeadersForAllShards returns tracked headers for all shards +func (bbt *baseBlockTrack) GetTrackedHeadersForAllShards() map[uint32][]data.HeaderHandler { + trackedHeaders := make(map[uint32][]data.HeaderHandler) + + for shardID := uint32(0); shardID < bbt.shardCoordinator.NumberOfShards(); shardID++ { + trackedHeadersForShard, _ := bbt.GetTrackedHeaders(shardID) + trackedHeaders[shardID] = append(trackedHeaders[shardID], trackedHeadersForShard...) + } + + return trackedHeaders +} + +func (bbt *baseBlockTrack) sortHeadersFromNonce(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) { + bbt.mutHeaders.RLock() + defer bbt.mutHeaders.RUnlock() + + headersForShard, ok := bbt.headers[shardID] + if !ok { + return nil, nil + } + + sortedHeadersInfo := make([]*headerInfo, 0) + + for headersNonce, headersInfo := range headersForShard { + if headersNonce < nonce { + continue + } + + sortedHeadersInfo = append(sortedHeadersInfo, headersInfo...) + } + + if len(sortedHeadersInfo) > 1 { + sort.Slice(sortedHeadersInfo, func(i, j int) bool { + return sortedHeadersInfo[i].header.GetNonce() < sortedHeadersInfo[j].header.GetNonce() + }) + } + + headers := make([]data.HeaderHandler, 0) + headersHashes := make([][]byte, 0) + + for _, headerInfo := range sortedHeadersInfo { + headers = append(headers, headerInfo.header) + headersHashes = append(headersHashes, headerInfo.hash) + } + + return headers, headersHashes +} + +// GetTrackedHeadersWithNonce returns tracked headers for a given shard and nonce +func (bbt *baseBlockTrack) GetTrackedHeadersWithNonce(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) { + bbt.mutHeaders.RLock() + defer bbt.mutHeaders.RUnlock() + + headersForShard, ok := bbt.headers[shardID] + if !ok { + return nil, nil + } + + headersForShardWithNonce, ok := headersForShard[nonce] + if !ok { + return nil, nil + } + + headers := make([]data.HeaderHandler, 0) + headersHashes := make([][]byte, 0) + + for _, headerInfo := range headersForShardWithNonce { + headers = append(headers, headerInfo.header) + headersHashes = append(headersHashes, headerInfo.hash) + } + + return headers, headersHashes +} + +// IsShardStuck returns true if the given shard is stuck +func (bbt *baseBlockTrack) IsShardStuck(shardId uint32) bool { + header := bbt.getLastHeader(shardId) + if check.IfNil(header) { + return false + } + + isShardStuck := bbt.rounder.Index()-int64(header.GetRound()) >= process.MaxRoundsWithoutCommittedBlock + return isShardStuck +} + +func (bbt *baseBlockTrack) getLastHeader(shardID uint32) data.HeaderHandler { + bbt.mutHeaders.RLock() + defer bbt.mutHeaders.RUnlock() + + var lastHeaderForShard data.HeaderHandler + + headersForShard, ok := bbt.headers[shardID] + if !ok { + return lastHeaderForShard + } + + maxRound := uint64(0) + for _, headersInfo := range headersForShard { + for _, headerInfo := range headersInfo { + if headerInfo.header.GetRound() > maxRound { + maxRound = headerInfo.header.GetRound() + lastHeaderForShard = headerInfo.header + } + } + } + + return lastHeaderForShard +} + +// RegisterCrossNotarizedHeadersHandler registers a new handler to be called when cross notarized header is changed +func (bbt *baseBlockTrack) RegisterCrossNotarizedHeadersHandler( + handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte), +) { + bbt.crossNotarizedHeadersNotifier.registerHandler(handler) +} + +// RegisterSelfNotarizedHeadersHandler registers a new handler to be called when self notarized header is changed +func (bbt *baseBlockTrack) RegisterSelfNotarizedHeadersHandler( + handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte), +) { + bbt.selfNotarizedHeadersNotifier.registerHandler(handler) +} + +// RemoveLastNotarizedHeaders removes last notarized headers from tracker list +func (bbt *baseBlockTrack) RemoveLastNotarizedHeaders() { + bbt.crossNotarizer.removeLastNotarizedHeader() + bbt.selfNotarizer.removeLastNotarizedHeader() +} + +// RestoreToGenesis sets class variables to theirs initial values +func (bbt *baseBlockTrack) RestoreToGenesis() { + bbt.crossNotarizer.restoreNotarizedHeadersToGenesis() + bbt.selfNotarizer.restoreNotarizedHeadersToGenesis() + bbt.restoreTrackedHeadersToGenesis() +} + +func (bbt *baseBlockTrack) restoreTrackedHeadersToGenesis() { + bbt.mutHeaders.Lock() + bbt.headers = make(map[uint32]map[uint64][]*headerInfo) + bbt.mutHeaders.Unlock() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (bbt *baseBlockTrack) IsInterfaceNil() bool { + return bbt == nil +} + +func checkTrackerNilParameters(arguments ArgBaseTracker) error { + if check.IfNil(arguments.Hasher) { + return process.ErrNilHasher + } + if check.IfNil(arguments.HeaderValidator) { + return process.ErrNilHeaderValidator + } + if check.IfNil(arguments.Marshalizer) { + return process.ErrNilMarshalizer + } + if check.IfNil(arguments.RequestHandler) { + return process.ErrNilRequestHandler + } + if check.IfNil(arguments.Rounder) { + return process.ErrNilRounder + } + if check.IfNil(arguments.ShardCoordinator) { + return process.ErrNilShardCoordinator + } + if check.IfNil(arguments.Store) { + return process.ErrNilStorage + } + + return nil +} + +func (bbt *baseBlockTrack) initNotarizedHeaders(startHeaders map[uint32]data.HeaderHandler) error { + err := bbt.crossNotarizer.initNotarizedHeaders(startHeaders) + if err != nil { + return err + } + + err = bbt.selfNotarizer.initNotarizedHeaders(startHeaders) + if err != nil { + return err + } + + return nil +} diff --git a/process/track/baseBlockTrack_test.go b/process/track/baseBlockTrack_test.go new file mode 100644 index 00000000000..4789f2d59f7 --- /dev/null +++ b/process/track/baseBlockTrack_test.go @@ -0,0 +1,3 @@ +package track_test + +//TODO: Should be added units test for base block track diff --git a/process/track/blockNotarizer.go b/process/track/blockNotarizer.go new file mode 100644 index 00000000000..8c24490c04d --- /dev/null +++ b/process/track/blockNotarizer.go @@ -0,0 +1,226 @@ +package track + +import ( + "sort" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" +) + +type blockNotarizer struct { + hasher hashing.Hasher + marshalizer marshal.Marshalizer + + mutNotarizedHeaders sync.RWMutex + notarizedHeaders map[uint32][]*headerInfo +} + +// NewBlockNotarizer creates a block notarizer object which implements blockNotarizerHandler interface +func NewBlockNotarizer(hasher hashing.Hasher, marshalizer marshal.Marshalizer) (*blockNotarizer, error) { + if check.IfNil(hasher) { + return nil, process.ErrNilHasher + } + if check.IfNil(marshalizer) { + return nil, process.ErrNilMarshalizer + } + + bn := blockNotarizer{ + hasher: hasher, + marshalizer: marshalizer, + } + + bn.notarizedHeaders = make(map[uint32][]*headerInfo) + + return &bn, nil +} + +func (bn *blockNotarizer) addNotarizedHeader( + shardID uint32, + notarizedHeader data.HeaderHandler, + notarizedHeaderHash []byte, +) { + if check.IfNil(notarizedHeader) { + return + } + + bn.mutNotarizedHeaders.Lock() + bn.notarizedHeaders[shardID] = append(bn.notarizedHeaders[shardID], &headerInfo{header: notarizedHeader, hash: notarizedHeaderHash}) + if len(bn.notarizedHeaders[shardID]) > 1 { + sort.Slice(bn.notarizedHeaders[shardID], func(i, j int) bool { + return bn.notarizedHeaders[shardID][i].header.GetNonce() < bn.notarizedHeaders[shardID][j].header.GetNonce() + }) + } + bn.mutNotarizedHeaders.Unlock() +} + +func (bn *blockNotarizer) cleanupNotarizedHeadersBehindNonce(shardID uint32, nonce uint64) { + if nonce == 0 { + return + } + + bn.mutNotarizedHeaders.Lock() + defer bn.mutNotarizedHeaders.Unlock() + + notarizedHeaders, ok := bn.notarizedHeaders[shardID] + if !ok { + return + } + + headersInfo := make([]*headerInfo, 0) + for _, headerInfo := range notarizedHeaders { + if headerInfo.header.GetNonce() < nonce { + continue + } + + headersInfo = append(headersInfo, headerInfo) + } + + bn.notarizedHeaders[shardID] = headersInfo +} + +func (bn *blockNotarizer) displayNotarizedHeaders(shardID uint32, message string) { + bn.mutNotarizedHeaders.RLock() + defer bn.mutNotarizedHeaders.RUnlock() + + notarizedHeaders, ok := bn.notarizedHeaders[shardID] + if !ok { + return + } + + if len(notarizedHeaders) > 1 { + sort.Slice(notarizedHeaders, func(i, j int) bool { + return notarizedHeaders[i].header.GetNonce() < notarizedHeaders[j].header.GetNonce() + }) + } + + shouldNotDisplay := len(notarizedHeaders) == 0 || + len(notarizedHeaders) == 1 && notarizedHeaders[0].header.GetNonce() == 0 + if shouldNotDisplay { + return + } + + log.Debug(message, + "shard", shardID, + "nb", len(notarizedHeaders)) + + for _, headerInfo := range notarizedHeaders { + log.Trace("notarized header info", + "round", headerInfo.header.GetRound(), + "nonce", headerInfo.header.GetNonce(), + "hash", headerInfo.hash) + } +} + +func (bn *blockNotarizer) getLastNotarizedHeader(shardID uint32) (data.HeaderHandler, []byte, error) { + bn.mutNotarizedHeaders.RLock() + defer bn.mutNotarizedHeaders.RUnlock() + + if bn.notarizedHeaders == nil { + return nil, nil, process.ErrNotarizedHeadersSliceIsNil + } + + headerInfo := bn.lastNotarizedHeaderInfo(shardID) + if headerInfo == nil { + return nil, nil, process.ErrNotarizedHeadersSliceForShardIsNil + } + + return headerInfo.header, headerInfo.hash, nil +} + +func (bn *blockNotarizer) getLastNotarizedHeaderNonce(shardID uint32) uint64 { + bn.mutNotarizedHeaders.RLock() + defer bn.mutNotarizedHeaders.RUnlock() + + if bn.notarizedHeaders == nil { + return 0 + } + + headerInfo := bn.lastNotarizedHeaderInfo(shardID) + if headerInfo == nil { + return 0 + } + + return headerInfo.header.GetNonce() +} + +func (bn *blockNotarizer) lastNotarizedHeaderInfo(shardID uint32) *headerInfo { + notarizedHeadersCount := len(bn.notarizedHeaders[shardID]) + if notarizedHeadersCount > 0 { + return bn.notarizedHeaders[shardID][notarizedHeadersCount-1] + } + + return nil +} + +func (bn *blockNotarizer) getNotarizedHeader(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) { + bn.mutNotarizedHeaders.RLock() + defer bn.mutNotarizedHeaders.RUnlock() + + if bn.notarizedHeaders == nil { + return nil, nil, process.ErrNotarizedHeadersSliceIsNil + } + + headersInfo := bn.notarizedHeaders[shardID] + if headersInfo == nil { + return nil, nil, process.ErrNotarizedHeadersSliceForShardIsNil + } + + notarizedHeadersCount := uint64(len(headersInfo)) + if notarizedHeadersCount <= offset { + return nil, nil, ErrNotarizedHeaderOffsetIsOutOfBound + } + + headerInfo := headersInfo[notarizedHeadersCount-offset-1] + + return headerInfo.header, headerInfo.hash, nil +} + +func (bn *blockNotarizer) initNotarizedHeaders(startHeaders map[uint32]data.HeaderHandler) error { + if startHeaders == nil { + return process.ErrNotarizedHeadersSliceIsNil + } + + bn.mutNotarizedHeaders.Lock() + defer bn.mutNotarizedHeaders.Unlock() + + bn.notarizedHeaders = make(map[uint32][]*headerInfo) + + for _, startHeader := range startHeaders { + shardID := startHeader.GetShardID() + startHeaderHash, err := core.CalculateHash(bn.marshalizer, bn.hasher, startHeader) + if err != nil { + return err + } + + bn.notarizedHeaders[shardID] = append(bn.notarizedHeaders[shardID], &headerInfo{header: startHeader, hash: startHeaderHash}) + } + + return nil +} + +func (bn *blockNotarizer) removeLastNotarizedHeader() { + bn.mutNotarizedHeaders.Lock() + for shardID := range bn.notarizedHeaders { + notarizedHeadersCount := len(bn.notarizedHeaders[shardID]) + if notarizedHeadersCount > 1 { + bn.notarizedHeaders[shardID] = bn.notarizedHeaders[shardID][:notarizedHeadersCount-1] + } + } + bn.mutNotarizedHeaders.Unlock() +} + +func (bn *blockNotarizer) restoreNotarizedHeadersToGenesis() { + bn.mutNotarizedHeaders.Lock() + for shardID := range bn.notarizedHeaders { + notarizedHeadersCount := len(bn.notarizedHeaders[shardID]) + if notarizedHeadersCount > 1 { + bn.notarizedHeaders[shardID] = bn.notarizedHeaders[shardID][:1] + } + } + bn.mutNotarizedHeaders.Unlock() +} diff --git a/process/track/blockNotarizer_test.go b/process/track/blockNotarizer_test.go new file mode 100644 index 00000000000..5adfaaf1a8c --- /dev/null +++ b/process/track/blockNotarizer_test.go @@ -0,0 +1,3 @@ +package track_test + +//TODO: Should be added units test for tracked blocks notarizer diff --git a/process/track/blockNotifier.go b/process/track/blockNotifier.go new file mode 100644 index 00000000000..8d5c35c2f26 --- /dev/null +++ b/process/track/blockNotifier.go @@ -0,0 +1,42 @@ +package track + +import ( + "sync" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type blockNotifier struct { + mutNotarizedHeadersHandlers sync.RWMutex + notarizedHeadersHandlers []func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte) +} + +// NewBlockNotifier creates a block notifier object which implements blockNotifierHandler interface +func NewBlockNotifier() (*blockNotifier, error) { + bn := blockNotifier{} + bn.notarizedHeadersHandlers = make([]func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte), 0) + return &bn, nil +} + +func (bn *blockNotifier) callHandlers(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte) { + if len(headers) == 0 { + return + } + + bn.mutNotarizedHeadersHandlers.RLock() + for _, handler := range bn.notarizedHeadersHandlers { + go handler(shardID, headers, headersHashes) + } + bn.mutNotarizedHeadersHandlers.RUnlock() +} + +func (bn *blockNotifier) registerHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) { + if handler == nil { + log.Warn("attempt to register a nil handler to a tracker object") + return + } + + bn.mutNotarizedHeadersHandlers.Lock() + bn.notarizedHeadersHandlers = append(bn.notarizedHeadersHandlers, handler) + bn.mutNotarizedHeadersHandlers.Unlock() +} diff --git a/process/track/blockNotifier_test.go b/process/track/blockNotifier_test.go new file mode 100644 index 00000000000..e7706e7a75a --- /dev/null +++ b/process/track/blockNotifier_test.go @@ -0,0 +1,3 @@ +package track_test + +//TODO: Should be added units test for tracked blocks notifier diff --git a/process/track/blockProcessor.go b/process/track/blockProcessor.go new file mode 100644 index 00000000000..5b2b4d2afb8 --- /dev/null +++ b/process/track/blockProcessor.go @@ -0,0 +1,318 @@ +package track + +import ( + "sort" + + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type blockProcessor struct { + headerValidator process.HeaderConstructionValidator + requestHandler process.RequestHandler + shardCoordinator sharding.Coordinator + + blockTracker blockTrackerHandler + crossNotarizer blockNotarizerHandler + crossNotarizedHeadersNotifier blockNotifierHandler + selfNotarizedHeadersNotifier blockNotifierHandler + + blockFinality uint64 +} + +// NewBlockProcessor creates a block processor object which implements blockProcessorHandler interface +func NewBlockProcessor( + headerValidator process.HeaderConstructionValidator, + requestHandler process.RequestHandler, + shardCoordinator sharding.Coordinator, + blockTracker blockTrackerHandler, + crossNotarizer blockNotarizerHandler, + crossNotarizedHeadersNotifier blockNotifierHandler, + selfNotarizedHeadersNotifier blockNotifierHandler, +) (*blockProcessor, error) { + + err := checkBlockProcessorNilParameters( + headerValidator, + requestHandler, + shardCoordinator, + blockTracker, + crossNotarizer, + crossNotarizedHeadersNotifier, + selfNotarizedHeadersNotifier, + ) + if err != nil { + return nil, err + } + + bp := blockProcessor{ + headerValidator: headerValidator, + requestHandler: requestHandler, + shardCoordinator: shardCoordinator, + blockTracker: blockTracker, + crossNotarizer: crossNotarizer, + crossNotarizedHeadersNotifier: crossNotarizedHeadersNotifier, + selfNotarizedHeadersNotifier: selfNotarizedHeadersNotifier, + } + + bp.blockFinality = process.BlockFinality + + return &bp, nil +} + +func (bp *blockProcessor) processReceivedHeader(header data.HeaderHandler) { + if check.IfNil(header) { + return + } + + isHeaderForSelfShard := header.GetShardID() == bp.shardCoordinator.SelfId() + if isHeaderForSelfShard { + bp.doJobOnReceivedHeader(header.GetShardID()) + } else { + bp.doJobOnReceivedCrossNotarizedHeader(header.GetShardID()) + } +} + +func (bp *blockProcessor) doJobOnReceivedHeader(shardID uint32) { + _, _, selfNotarizedHeaders, selfNotarizedHeadersHashes := bp.blockTracker.computeLongestSelfChain() + + if len(selfNotarizedHeaders) > 0 { + bp.selfNotarizedHeadersNotifier.callHandlers(shardID, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } +} + +func (bp *blockProcessor) doJobOnReceivedCrossNotarizedHeader(shardID uint32) { + _, _, crossNotarizedHeaders, crossNotarizedHeadersHashes := bp.computeLongestChainFromLastCrossNotarized(shardID) + selfNotarizedHeaders, selfNotarizedHeadersHashes := bp.computeSelfNotarizedHeaders(crossNotarizedHeaders) + + if len(crossNotarizedHeaders) > 0 { + bp.crossNotarizedHeadersNotifier.callHandlers(shardID, crossNotarizedHeaders, crossNotarizedHeadersHashes) + } + + if len(selfNotarizedHeaders) > 0 { + bp.selfNotarizedHeadersNotifier.callHandlers(shardID, selfNotarizedHeaders, selfNotarizedHeadersHashes) + } +} + +func (bp *blockProcessor) computeLongestChainFromLastCrossNotarized( + shardID uint32, +) (data.HeaderHandler, []byte, []data.HeaderHandler, [][]byte) { + + lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash, err := bp.crossNotarizer.getLastNotarizedHeader(shardID) + if err != nil { + return nil, nil, nil, nil + } + + headers, hashes := bp.computeLongestChain(shardID, lastCrossNotarizedHeader) + return lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash, headers, hashes +} + +func (bp *blockProcessor) computeSelfNotarizedHeaders(headers []data.HeaderHandler) ([]data.HeaderHandler, [][]byte) { + selfNotarizedHeadersInfo := make([]*headerInfo, 0) + + for _, header := range headers { + selfHeadersInfo := bp.blockTracker.getSelfHeaders(header) + if len(selfHeadersInfo) > 0 { + selfNotarizedHeadersInfo = append(selfNotarizedHeadersInfo, selfHeadersInfo...) + } + } + + if len(selfNotarizedHeadersInfo) > 1 { + sort.Slice(selfNotarizedHeadersInfo, func(i, j int) bool { + return selfNotarizedHeadersInfo[i].header.GetNonce() < selfNotarizedHeadersInfo[j].header.GetNonce() + }) + } + + selfNotarizedHeaders := make([]data.HeaderHandler, 0) + selfNotarizedHeadersHashes := make([][]byte, 0) + + for _, selfNotarizedHeaderInfo := range selfNotarizedHeadersInfo { + selfNotarizedHeaders = append(selfNotarizedHeaders, selfNotarizedHeaderInfo.header) + selfNotarizedHeadersHashes = append(selfNotarizedHeadersHashes, selfNotarizedHeaderInfo.hash) + } + + return selfNotarizedHeaders, selfNotarizedHeadersHashes +} + +func (bp *blockProcessor) computeLongestChain(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) { + headers := make([]data.HeaderHandler, 0) + headersHashes := make([][]byte, 0) + + if check.IfNil(header) { + return headers, headersHashes + } + + sortedHeaders, sortedHeadersHashes := bp.blockTracker.sortHeadersFromNonce(shardID, header.GetNonce()+1) + if len(sortedHeaders) == 0 { + return headers, headersHashes + } + + longestChainHeadersIndexes := make([]int, 0) + headersIndexes := make([]int, 0) + bp.getNextHeader(&longestChainHeadersIndexes, headersIndexes, header, sortedHeaders, 0) + + for _, index := range longestChainHeadersIndexes { + headers = append(headers, sortedHeaders[index]) + headersHashes = append(headersHashes, sortedHeadersHashes[index]) + } + + bp.requestHeadersIfNeeded(header, sortedHeaders, headers) + + return headers, headersHashes +} + +func (bp *blockProcessor) getNextHeader( + longestChainHeadersIndexes *[]int, + headersIndexes []int, + prevHeader data.HeaderHandler, + sortedHeaders []data.HeaderHandler, + index int, +) { + defer func() { + if len(headersIndexes) > len(*longestChainHeadersIndexes) { + *longestChainHeadersIndexes = headersIndexes + } + }() + + if check.IfNil(prevHeader) { + return + } + + for i := index; i < len(sortedHeaders); i++ { + currHeader := sortedHeaders[i] + if currHeader.GetNonce() > prevHeader.GetNonce()+1 { + break + } + + err := bp.headerValidator.IsHeaderConstructionValid(currHeader, prevHeader) + if err != nil { + continue + } + + err = bp.checkHeaderFinality(currHeader, sortedHeaders, i+1) + if err != nil { + continue + } + + headersIndexes = append(headersIndexes, i) + bp.getNextHeader(longestChainHeadersIndexes, headersIndexes, currHeader, sortedHeaders, i+1) + headersIndexes = headersIndexes[:len(headersIndexes)-1] + } +} + +func (bp *blockProcessor) checkHeaderFinality( + header data.HeaderHandler, + sortedHeaders []data.HeaderHandler, + index int, +) error { + + if check.IfNil(header) { + return process.ErrNilBlockHeader + } + + prevHeader := header + numFinalityAttestingHeaders := uint64(0) + + for i := index; i < len(sortedHeaders); i++ { + currHeader := sortedHeaders[i] + if numFinalityAttestingHeaders >= bp.blockFinality || currHeader.GetNonce() > prevHeader.GetNonce()+1 { + break + } + + err := bp.headerValidator.IsHeaderConstructionValid(currHeader, prevHeader) + if err != nil { + continue + } + + prevHeader = currHeader + numFinalityAttestingHeaders += 1 + } + + if numFinalityAttestingHeaders < bp.blockFinality { + return process.ErrHeaderNotFinal + } + + return nil +} + +func (bp *blockProcessor) requestHeadersIfNeeded( + lastNotarizedHeader data.HeaderHandler, + sortedHeaders []data.HeaderHandler, + longestChainHeaders []data.HeaderHandler, +) { + if check.IfNil(lastNotarizedHeader) { + return + } + + nbSortedHeaders := len(sortedHeaders) + if nbSortedHeaders == 0 { + return + } + + highestNonceReceived := sortedHeaders[nbSortedHeaders-1].GetNonce() + highestNonceInLongestChain := lastNotarizedHeader.GetNonce() + nbLongestChainHeaders := len(longestChainHeaders) + if nbLongestChainHeaders > 0 { + highestNonceInLongestChain = longestChainHeaders[nbLongestChainHeaders-1].GetNonce() + } + + if highestNonceReceived <= highestNonceInLongestChain+bp.blockFinality { + return + } + + log.Debug("requestHeadersIfNeeded", + "shard", lastNotarizedHeader.GetShardID(), + "last notarized nonce", lastNotarizedHeader.GetNonce(), + "highest nonce received", highestNonceReceived, + "highest nonce in longest chain", highestNonceInLongestChain) + + shardID := lastNotarizedHeader.GetShardID() + fromNonce := highestNonceInLongestChain + 1 + toNonce := fromNonce + uint64(bp.blockFinality) + for nonce := fromNonce; nonce <= toNonce; nonce++ { + log.Debug("request header", + "shard", shardID, + "nonce", nonce) + + if shardID == sharding.MetachainShardId { + go bp.requestHandler.RequestMetaHeaderByNonce(nonce) + } else { + go bp.requestHandler.RequestShardHeaderByNonce(shardID, nonce) + } + } +} + +func checkBlockProcessorNilParameters( + headerValidator process.HeaderConstructionValidator, + requestHandler process.RequestHandler, + shardCoordinator sharding.Coordinator, + blockTracker blockTrackerHandler, + crossNotarizer blockNotarizerHandler, + crossNotarizedHeadersNotifier blockNotifierHandler, + selfNotarizedHeadersNotifier blockNotifierHandler, +) error { + if check.IfNil(headerValidator) { + return process.ErrNilHeaderValidator + } + if check.IfNil(requestHandler) { + return process.ErrNilRequestHandler + } + if check.IfNil(shardCoordinator) { + return process.ErrNilShardCoordinator + } + if blockTracker == nil { + return ErrNilBlockTrackerHandler + } + if crossNotarizer == nil { + return ErrNilCrossNotarizer + } + if crossNotarizedHeadersNotifier == nil { + return ErrCrossNotarizedHeadersNotifier + } + if selfNotarizedHeadersNotifier == nil { + return ErrSelfNotarizedHeadersNotifier + } + + return nil +} diff --git a/process/track/blockProcessor_test.go b/process/track/blockProcessor_test.go new file mode 100644 index 00000000000..dfa74728687 --- /dev/null +++ b/process/track/blockProcessor_test.go @@ -0,0 +1,3 @@ +package track_test + +//TODO: Should be added units test for tracked blocks processor diff --git a/process/track/errors.go b/process/track/errors.go new file mode 100644 index 00000000000..7ba9f26135a --- /dev/null +++ b/process/track/errors.go @@ -0,0 +1,20 @@ +package track + +import ( + "github.com/pkg/errors" +) + +// ErrNilBlockTrackerHandler signals that a nil block tracker handler has been provided +var ErrNilBlockTrackerHandler = errors.New("nil block tracker handler") + +// ErrNilCrossNotarizer signals that a nil block notarizer handler has been provided +var ErrNilCrossNotarizer = errors.New("nil cross notarizer") + +// ErrCrossNotarizedHeadersNotifier signals that a nil block notifier handler has been provided +var ErrCrossNotarizedHeadersNotifier = errors.New("nil cross notarized header notifier") + +// ErrSelfNotarizedHeadersNotifier signals that a nil block notifier handler has been provided +var ErrSelfNotarizedHeadersNotifier = errors.New("nil self notarized header notifier") + +// ErrNotarizedHeaderOffsetIsOutOfBound signals that a requested offset of the notarized header is out of bound +var ErrNotarizedHeaderOffsetIsOutOfBound = errors.New("requested offset of the notarized header is out of bound") diff --git a/process/track/export_test.go b/process/track/export_test.go new file mode 100644 index 00000000000..4321123879f --- /dev/null +++ b/process/track/export_test.go @@ -0,0 +1 @@ +package track diff --git a/process/track/interface.go b/process/track/interface.go new file mode 100644 index 00000000000..7bf8854f1ef --- /dev/null +++ b/process/track/interface.go @@ -0,0 +1,33 @@ +package track + +import ( + "github.com/ElrondNetwork/elrond-go/data" +) + +type blockNotarizerHandler interface { + addNotarizedHeader(shardID uint32, notarizedHeader data.HeaderHandler, notarizedHeaderHash []byte) + cleanupNotarizedHeadersBehindNonce(shardID uint32, nonce uint64) + displayNotarizedHeaders(shardID uint32, message string) + getLastNotarizedHeader(shardID uint32) (data.HeaderHandler, []byte, error) + getLastNotarizedHeaderNonce(shardID uint32) uint64 + getNotarizedHeader(shardID uint32, offset uint64) (data.HeaderHandler, []byte, error) + initNotarizedHeaders(startHeaders map[uint32]data.HeaderHandler) error + removeLastNotarizedHeader() + restoreNotarizedHeadersToGenesis() +} + +type blockNotifierHandler interface { + callHandlers(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte) + registerHandler(handler func(shardID uint32, headers []data.HeaderHandler, headersHashes [][]byte)) +} + +type blockProcessorHandler interface { + computeLongestChain(shardID uint32, header data.HeaderHandler) ([]data.HeaderHandler, [][]byte) + processReceivedHeader(header data.HeaderHandler) +} + +type blockTrackerHandler interface { + getSelfHeaders(headerHandler data.HeaderHandler) []*headerInfo + computeLongestSelfChain() (data.HeaderHandler, []byte, []data.HeaderHandler, [][]byte) + sortHeadersFromNonce(shardID uint32, nonce uint64) ([]data.HeaderHandler, [][]byte) +} diff --git a/process/track/metaBlockTrack.go b/process/track/metaBlockTrack.go new file mode 100644 index 00000000000..18a5f655437 --- /dev/null +++ b/process/track/metaBlockTrack.go @@ -0,0 +1,123 @@ +package track + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process" +) + +type metaBlockTrack struct { + *baseBlockTrack +} + +// NewMetaBlockTrack creates an object for tracking the received meta blocks +func NewMetaBlockTrack(arguments ArgMetaTracker) (*metaBlockTrack, error) { + err := checkTrackerNilParameters(arguments.ArgBaseTracker) + if err != nil { + return nil, err + } + + if check.IfNil(arguments.PoolsHolder) { + return nil, process.ErrNilPoolsHolder + } + if check.IfNil(arguments.PoolsHolder.Headers()) { + return nil, process.ErrNilHeadersDataPool + } + + crossNotarizer, err := NewBlockNotarizer(arguments.Hasher, arguments.Marshalizer) + if err != nil { + return nil, err + } + + selfNotarizer, err := NewBlockNotarizer(arguments.Hasher, arguments.Marshalizer) + if err != nil { + return nil, err + } + + crossNotarizedHeadersNotifier, err := NewBlockNotifier() + if err != nil { + return nil, err + } + + selfNotarizedHeadersNotifier, err := NewBlockNotifier() + if err != nil { + return nil, err + } + + bbt := &baseBlockTrack{ + hasher: arguments.Hasher, + headerValidator: arguments.HeaderValidator, + marshalizer: arguments.Marshalizer, + rounder: arguments.Rounder, + shardCoordinator: arguments.ShardCoordinator, + headersPool: arguments.PoolsHolder.Headers(), + store: arguments.Store, + crossNotarizer: crossNotarizer, + selfNotarizer: selfNotarizer, + crossNotarizedHeadersNotifier: crossNotarizedHeadersNotifier, + selfNotarizedHeadersNotifier: selfNotarizedHeadersNotifier, + } + + err = bbt.initNotarizedHeaders(arguments.StartHeaders) + if err != nil { + return nil, err + } + + mbt := metaBlockTrack{ + baseBlockTrack: bbt, + } + + blockProcessor, err := NewBlockProcessor( + arguments.HeaderValidator, + arguments.RequestHandler, + arguments.ShardCoordinator, + &mbt, + crossNotarizer, + crossNotarizedHeadersNotifier, + selfNotarizedHeadersNotifier, + ) + if err != nil { + return nil, err + } + + mbt.blockProcessor = blockProcessor + + mbt.headers = make(map[uint32]map[uint64][]*headerInfo) + mbt.headersPool.RegisterHandler(mbt.receivedHeader) + + return &mbt, nil +} + +func (mbt *metaBlockTrack) getSelfHeaders(headerHandler data.HeaderHandler) []*headerInfo { + selfMetaBlocksInfo := make([]*headerInfo, 0) + + header, ok := headerHandler.(*block.Header) + if !ok { + log.Debug("getSelfHeaders", process.ErrWrongTypeAssertion) + return selfMetaBlocksInfo + } + + for _, metaBlockHash := range header.MetaBlockHashes { + metaBlock, err := process.GetMetaHeader(metaBlockHash, mbt.headersPool, mbt.marshalizer, mbt.store) + if err != nil { + log.Debug("GetMetaHeader", err.Error()) + continue + } + + selfMetaBlocksInfo = append(selfMetaBlocksInfo, &headerInfo{hash: metaBlockHash, header: metaBlock}) + } + + return selfMetaBlocksInfo +} + +func (mbt *metaBlockTrack) computeLongestSelfChain() (data.HeaderHandler, []byte, []data.HeaderHandler, [][]byte) { + lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash, err := mbt.selfNotarizer.getLastNotarizedHeader(mbt.shardCoordinator.SelfId()) + if err != nil { + log.Warn("computeLongestSelfChain.getLastNotarizedHeader", "error", err.Error()) + return nil, nil, nil, nil + } + + headers, hashes := mbt.ComputeLongestChain(mbt.shardCoordinator.SelfId(), lastSelfNotarizedHeader) + return lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash, headers, hashes +} diff --git a/process/track/shardBlockTrack.go b/process/track/shardBlockTrack.go new file mode 100644 index 00000000000..9aae43d8f5e --- /dev/null +++ b/process/track/shardBlockTrack.go @@ -0,0 +1,128 @@ +package track + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type shardBlockTrack struct { + *baseBlockTrack +} + +// NewShardBlockTrack creates an object for tracking the received shard blocks +func NewShardBlockTrack(arguments ArgShardTracker) (*shardBlockTrack, error) { + err := checkTrackerNilParameters(arguments.ArgBaseTracker) + if err != nil { + return nil, err + } + + if check.IfNil(arguments.PoolsHolder) { + return nil, process.ErrNilPoolsHolder + } + if check.IfNil(arguments.PoolsHolder.Headers()) { + return nil, process.ErrNilHeadersDataPool + } + + crossNotarizer, err := NewBlockNotarizer(arguments.Hasher, arguments.Marshalizer) + if err != nil { + return nil, err + } + + selfNotarizer, err := NewBlockNotarizer(arguments.Hasher, arguments.Marshalizer) + if err != nil { + return nil, err + } + + crossNotarizedHeadersNotifier, err := NewBlockNotifier() + if err != nil { + return nil, err + } + + selfNotarizedHeadersNotifier, err := NewBlockNotifier() + if err != nil { + return nil, err + } + + bbt := &baseBlockTrack{ + hasher: arguments.Hasher, + headerValidator: arguments.HeaderValidator, + marshalizer: arguments.Marshalizer, + rounder: arguments.Rounder, + shardCoordinator: arguments.ShardCoordinator, + headersPool: arguments.PoolsHolder.Headers(), + store: arguments.Store, + crossNotarizer: crossNotarizer, + selfNotarizer: selfNotarizer, + crossNotarizedHeadersNotifier: crossNotarizedHeadersNotifier, + selfNotarizedHeadersNotifier: selfNotarizedHeadersNotifier, + } + + err = bbt.initNotarizedHeaders(arguments.StartHeaders) + if err != nil { + return nil, err + } + + sbt := shardBlockTrack{ + baseBlockTrack: bbt, + } + + blockProcessor, err := NewBlockProcessor( + arguments.HeaderValidator, + arguments.RequestHandler, + arguments.ShardCoordinator, + &sbt, + crossNotarizer, + crossNotarizedHeadersNotifier, + selfNotarizedHeadersNotifier, + ) + if err != nil { + return nil, err + } + + sbt.blockProcessor = blockProcessor + + sbt.headers = make(map[uint32]map[uint64][]*headerInfo) + sbt.headersPool.RegisterHandler(sbt.receivedHeader) + + return &sbt, nil +} + +func (sbt *shardBlockTrack) getSelfHeaders(headerHandler data.HeaderHandler) []*headerInfo { + selfHeadersInfo := make([]*headerInfo, 0) + + metaBlock, ok := headerHandler.(*block.MetaBlock) + if !ok { + log.Debug("getSelfHeaders", process.ErrWrongTypeAssertion) + return selfHeadersInfo + } + + for _, shardInfo := range metaBlock.ShardInfo { + if shardInfo.ShardID != sbt.shardCoordinator.SelfId() { + continue + } + + header, err := process.GetShardHeader(shardInfo.HeaderHash, sbt.headersPool, sbt.marshalizer, sbt.store) + if err != nil { + log.Debug("GetShardHeader", err.Error()) + continue + } + + selfHeadersInfo = append(selfHeadersInfo, &headerInfo{hash: shardInfo.HeaderHash, header: header}) + } + + return selfHeadersInfo +} + +func (sbt *shardBlockTrack) computeLongestSelfChain() (data.HeaderHandler, []byte, []data.HeaderHandler, [][]byte) { + lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash, err := sbt.selfNotarizer.getLastNotarizedHeader(sharding.MetachainShardId) + if err != nil { + log.Warn("computeLongestSelfChain.getLastNotarizedHeader", "error", err.Error()) + return nil, nil, nil, nil + } + + headers, hashes := sbt.ComputeLongestChain(sbt.shardCoordinator.SelfId(), lastSelfNotarizedHeader) + return lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash, headers, hashes +} diff --git a/process/transaction/baseProcess.go b/process/transaction/baseProcess.go index f2f3fab0c00..39740f8bb75 100644 --- a/process/transaction/baseProcess.go +++ b/process/transaction/baseProcess.go @@ -4,6 +4,7 @@ import ( "bytes" "math/big" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" @@ -111,7 +112,7 @@ func (txProc *baseTxProcessor) getAddresses( } func (txProc *baseTxProcessor) checkTxValues(tx *transaction.Transaction, acntSnd state.AccountHandler) error { - if acntSnd == nil || acntSnd.IsInterfaceNil() { + if check.IfNil(acntSnd) { // transaction was already done at sender shard return nil } diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index 6fc1f35b479..6619678b0e1 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -288,7 +288,7 @@ func TestNewInterceptedTransaction_ShouldWork(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -311,7 +311,7 @@ func TestInterceptedTransaction_CheckValidityNilSignatureShouldErr(t *testing.T) tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -331,7 +331,7 @@ func TestInterceptedTransaction_CheckValidityNilRecvAddressShouldErr(t *testing. tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: nil, @@ -351,7 +351,7 @@ func TestInterceptedTransaction_CheckValidityNilSenderAddressShouldErr(t *testin tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -371,7 +371,7 @@ func TestInterceptedTransaction_CheckValidityNilValueShouldErr(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 1, Value: nil, - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -389,7 +389,7 @@ func TestInterceptedTransaction_CheckValidityNilNegativeValueShouldErr(t *testin tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(-2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -411,7 +411,7 @@ func TestNewInterceptedTransaction_InsufficientFeeShouldErr(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: gasLimit, GasPrice: gasPrice, RcvAddr: recvAddress, @@ -437,7 +437,7 @@ func TestInterceptedTransaction_CheckValidityInvalidSenderShouldErr(t *testing.T tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -457,7 +457,7 @@ func TestInterceptedTransaction_CheckValidityVerifyFailsShouldErr(t *testing.T) tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -477,7 +477,7 @@ func TestInterceptedTransaction_CheckValidityOkValsShouldWork(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -497,7 +497,7 @@ func TestInterceptedTransaction_OkValsGettersShouldWork(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -524,7 +524,7 @@ func TestInterceptedTransaction_ScTxDeployRecvShardIdShouldBeSendersShardId(t *t tx := &dataTransaction.Transaction{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddressDeploy, @@ -575,7 +575,7 @@ func TestInterceptedTransaction_GetNonce(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: nonce, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -595,7 +595,7 @@ func TestInterceptedTransaction_SenderShardId(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 0, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, @@ -623,7 +623,7 @@ func TestInterceptedTransaction_GetTotalValue(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 0, Value: txValue, - Data: "data", + Data: []byte("data"), GasLimit: gasPrice, GasPrice: gasLimit, RcvAddr: recvAddress, @@ -643,7 +643,7 @@ func TestInterceptedTransaction_GetSenderAddress(t *testing.T) { tx := &dataTransaction.Transaction{ Nonce: 0, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), GasLimit: 3, GasPrice: 4, RcvAddr: recvAddress, diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index 5d9b47de2c8..e1b3b8b4ee7 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -1,8 +1,13 @@ package transaction import ( + "errors" "math/big" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/receipt" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" @@ -21,6 +26,8 @@ type txProcessor struct { txTypeHandler process.TxTypeHandler shardCoordinator sharding.Coordinator economicsFee process.FeeHandler + receiptForwarder process.IntermediateTransactionHandler + badTxForwarder process.IntermediateTransactionHandler } // NewTxProcessor creates a new txProcessor engine @@ -34,35 +41,43 @@ func NewTxProcessor( txFeeHandler process.TransactionFeeHandler, txTypeHandler process.TxTypeHandler, economicsFee process.FeeHandler, + receiptForwarder process.IntermediateTransactionHandler, + badTxForwarder process.IntermediateTransactionHandler, ) (*txProcessor, error) { - if accounts == nil || accounts.IsInterfaceNil() { + if check.IfNil(accounts) { return nil, process.ErrNilAccountsAdapter } - if hasher == nil || hasher.IsInterfaceNil() { + if check.IfNil(hasher) { return nil, process.ErrNilHasher } - if addressConv == nil || addressConv.IsInterfaceNil() { + if check.IfNil(addressConv) { return nil, process.ErrNilAddressConverter } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if check.IfNil(marshalizer) { return nil, process.ErrNilMarshalizer } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if check.IfNil(shardCoordinator) { return nil, process.ErrNilShardCoordinator } - if scProcessor == nil || scProcessor.IsInterfaceNil() { + if check.IfNil(scProcessor) { return nil, process.ErrNilSmartContractProcessor } - if txFeeHandler == nil || txFeeHandler.IsInterfaceNil() { + if check.IfNil(txFeeHandler) { return nil, process.ErrNilUnsignedTxHandler } - if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { + if check.IfNil(txTypeHandler) { return nil, process.ErrNilTxTypeHandler } - if economicsFee == nil || economicsFee.IsInterfaceNil() { + if check.IfNil(economicsFee) { return nil, process.ErrNilEconomicsFeeHandler } + if check.IfNil(receiptForwarder) { + return nil, process.ErrNilReceiptHandler + } + if check.IfNil(badTxForwarder) { + return nil, process.ErrNilBadTxHandler + } baseTxProcess := &baseTxProcessor{ accounts: accounts, @@ -71,19 +86,21 @@ func NewTxProcessor( } return &txProcessor{ - baseTxProcessor: baseTxProcess, - hasher: hasher, - marshalizer: marshalizer, - scProcessor: scProcessor, - txFeeHandler: txFeeHandler, - txTypeHandler: txTypeHandler, - economicsFee: economicsFee, + baseTxProcessor: baseTxProcess, + hasher: hasher, + marshalizer: marshalizer, + scProcessor: scProcessor, + txFeeHandler: txFeeHandler, + txTypeHandler: txTypeHandler, + economicsFee: economicsFee, + receiptForwarder: receiptForwarder, + badTxForwarder: badTxForwarder, }, nil } // ProcessTransaction modifies the account states in respect with the transaction data func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction) error { - if tx == nil || tx.IsInterfaceNil() { + if check.IfNil(tx) { return process.ErrNilTransaction } @@ -99,6 +116,12 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction) error err = txProc.checkTxValues(tx, acntSnd) if err != nil { + if errors.Is(err, process.ErrInsufficientFunds) { + receiptErr := txProc.executingFailedTransaction(tx, acntSnd, err) + if receiptErr != nil { + return receiptErr + } + } return err } @@ -119,6 +142,100 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction) error return process.ErrWrongTransaction } +func (txProc *txProcessor) executingFailedTransaction( + tx *transaction.Transaction, + acntSnd state.AccountHandler, + txError error, +) error { + if check.IfNil(acntSnd) { + return nil + } + + account, ok := acntSnd.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + cost := txProc.economicsFee.ComputeFee(tx) + if account.Balance.Cmp(cost) < 0 { + cost.Set(account.Balance) + } + + operation := big.NewInt(0) + err := account.SetBalanceWithJournal(operation.Sub(account.Balance, cost)) + if err != nil { + return err + } + + err = txProc.increaseNonce(account) + if err != nil { + return err + } + + err = txProc.badTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{tx}) + if err != nil { + return err + } + + txHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) + if err != nil { + return err + } + + rpt := &receipt.Receipt{ + Value: big.NewInt(0).Set(cost), + SndAddr: tx.SndAddr, + Data: []byte(txError.Error()), + TxHash: txHash, + } + + err = txProc.receiptForwarder.AddIntermediateTransactions([]data.TransactionHandler{rpt}) + if err != nil { + return err + } + + return process.ErrFailedTransaction +} + +func (txProc *txProcessor) createReceiptWithReturnedGas(tx *transaction.Transaction, acntSnd *state.Account) error { + if check.IfNil(acntSnd) { + return nil + } + if core.IsSmartContractAddress(tx.RcvAddr) { + return nil + } + + totalProvided := big.NewInt(0) + totalProvided.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + + actualCost := txProc.economicsFee.ComputeFee(tx) + refundValue := big.NewInt(0).Sub(totalProvided, actualCost) + + zero := big.NewInt(0) + if refundValue.Cmp(zero) == 0 { + return nil + } + + txHash, err := core.CalculateHash(txProc.marshalizer, txProc.hasher, tx) + if err != nil { + return err + } + + rpt := &receipt.Receipt{ + Value: big.NewInt(0).Set(refundValue), + SndAddr: tx.SndAddr, + Data: []byte("refundedGas"), + TxHash: txHash, + } + + err = txProc.receiptForwarder.AddIntermediateTransactions([]data.TransactionHandler{rpt}) + if err != nil { + return err + } + + return nil +} + func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*big.Int, error) { if acntSnd == nil { return big.NewInt(0), nil @@ -126,13 +243,14 @@ func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *st err := txProc.economicsFee.CheckValidityTxValues(tx) if err != nil { + receiptErr := txProc.executingFailedTransaction(tx, acntSnd, err) + if receiptErr != nil { + return nil, receiptErr + } return nil, err } cost := txProc.economicsFee.ComputeFee(tx) - if acntSnd.Balance.Cmp(cost) < 0 { - return nil, process.ErrInsufficientFunds - } operation := big.NewInt(0) err = acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) @@ -175,6 +293,11 @@ func (txProc *txProcessor) processMoveBalance( } } + err = txProc.createReceiptWithReturnedGas(tx, acntSrc) + if err != nil { + return err + } + txProc.txFeeHandler.ProcessTransactionFee(txFee) return nil diff --git a/process/transaction/shardProcess_test.go b/process/transaction/shardProcess_test.go index f66748dfd30..beabfa10ed1 100644 --- a/process/transaction/shardProcess_test.go +++ b/process/transaction/shardProcess_test.go @@ -66,6 +66,8 @@ func createTxProcessor() txproc.TxProcessor { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) return txProc @@ -86,6 +88,8 @@ func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -105,6 +109,8 @@ func TestNewTxProcessor_NilHasherShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -124,6 +130,8 @@ func TestNewTxProcessor_NilAddressConverterMockShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -143,6 +151,8 @@ func TestNewTxProcessor_NilMarshalizerMockShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -162,6 +172,8 @@ func TestNewTxProcessor_NilShardCoordinatorMockShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -181,6 +193,8 @@ func TestNewTxProcessor_NilSCProcessorShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -200,6 +214,8 @@ func TestNewTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { nil, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Equal(t, process.ErrNilUnsignedTxHandler, err) @@ -219,6 +235,8 @@ func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) assert.Nil(t, err) @@ -242,6 +260,8 @@ func TestTxProcessor_GetAddressErrAddressConvShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) addressConv.Fail = true @@ -284,6 +304,8 @@ func TestTxProcessor_GetAccountsShouldErrNilAddressContainer(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -311,6 +333,8 @@ func TestTxProcessor_GetAccountsMalfunctionAccountsShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) adr1 := mock.NewAddressMock([]byte{65}) @@ -355,6 +379,8 @@ func TestTxProcessor_GetAccountsOkValsSrcShouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -408,6 +434,8 @@ func TestTxProcessor_GetAccountsOkValsDsthouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -446,6 +474,8 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr2) @@ -475,6 +505,8 @@ func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) a1, a2, err := execTx.GetAccounts(adr1, adr1) @@ -718,6 +750,8 @@ func TestTxProcessor_ProcessTransactionErrAddressConvShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) addressConv.Fail = true @@ -741,6 +775,8 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) tx := transaction.Transaction{} @@ -780,6 +816,8 @@ func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -834,6 +872,8 @@ func TestTxProcessor_ProcessCheckShouldPassWhenAdrSrcIsNotInNodeShard(t *testing &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -880,6 +920,8 @@ func TestTxProcessor_ProcessMoveBalancesShouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -936,6 +978,8 @@ func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t * &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -992,6 +1036,8 @@ func TestTxProcessor_ProcessIncreaseNonceShouldPassWhenAdrSrcIsNotInNodeShard(t &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -1042,6 +1088,8 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -1105,6 +1153,8 @@ func TestTxProcessor_MoveBalanceWithFeesShouldWork(t *testing.T) { &mock.UnsignedTxHandlerMock{}, &mock.TxTypeHandlerMock{}, feeHandler, + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -1173,6 +1223,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { }, }, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -1234,6 +1286,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * return process.SCInvoking, nil }}, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) @@ -1307,6 +1361,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod &mock.UnsignedTxHandlerMock{}, computeType, feeHandlerMock(), + &mock.IntermediateTransactionHandlerMock{}, + &mock.IntermediateTransactionHandlerMock{}, ) err = execTx.ProcessTransaction(&tx) diff --git a/process/unsigned/interceptedUnsignedTransaction_test.go b/process/unsigned/interceptedUnsignedTransaction_test.go index f839f629bbd..c7e6e6b8eaa 100644 --- a/process/unsigned/interceptedUnsignedTransaction_test.go +++ b/process/unsigned/interceptedUnsignedTransaction_test.go @@ -174,7 +174,7 @@ func TestNewInterceptedUnsignedTransaction_ShouldWork(t *testing.T) { tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: senderAddress, TxHash: []byte("TX"), @@ -193,7 +193,7 @@ func TestInterceptedUnsignedTransaction_CheckValidityNilTxHashShouldErr(t *testi tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: senderAddress, TxHash: nil, @@ -211,7 +211,7 @@ func TestInterceptedUnsignedTransaction_CheckValidityNilSenderAddressShouldErr(t tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: nil, TxHash: []byte("TX"), @@ -229,7 +229,7 @@ func TestInterceptedUnsignedTransaction_CheckValidityNilRecvAddressShouldErr(t * tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), RcvAddr: nil, SndAddr: senderAddress, TxHash: []byte("TX"), @@ -247,7 +247,7 @@ func TestInterceptedUnsignedTransaction_CheckValidityNilValueShouldErr(t *testin tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: nil, - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: senderAddress, TxHash: []byte("TX"), @@ -265,7 +265,7 @@ func TestInterceptedUnsignedTransaction_CheckValidityNilNegativeValueShouldErr(t tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: big.NewInt(-2), - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: senderAddress, TxHash: []byte("TX"), @@ -283,7 +283,7 @@ func TestInterceptedUnsignedTransaction_CheckValidityInvalidSenderShouldErr(t *t tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: []byte(""), TxHash: []byte("TX"), @@ -301,7 +301,7 @@ func TestInterceptedUnsignedTransaction_CheckValidityShouldWork(t *testing.T) { tx := &smartContractResult.SmartContractResult{ Nonce: 1, Value: big.NewInt(2), - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: senderAddress, TxHash: []byte("TX"), @@ -323,7 +323,7 @@ func TestInterceptedUnsignedTransaction_OkValsGettersShouldWork(t *testing.T) { tx := &smartContractResult.SmartContractResult{ Nonce: nonce, Value: value, - Data: "data", + Data: []byte("data"), RcvAddr: recvAddress, SndAddr: senderAddress, TxHash: []byte("TX"), diff --git a/scripts/testnet/config.sh b/scripts/testnet/config.sh new file mode 100755 index 00000000000..669775b05bf --- /dev/null +++ b/scripts/testnet/config.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +export ELRONDTESTNETSCRIPTSDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +source "$ELRONDTESTNETSCRIPTSDIR/variables.sh" +source "$ELRONDTESTNETSCRIPTSDIR/include/config.sh" +source "$ELRONDTESTNETSCRIPTSDIR/include/build.sh" + +prepareFolders + +buildConfigGenerator + +generateConfig + +copyConfig + +copySeednodeConfig +updateSeednodeConfig + +copyNodeConfig +updateNodeConfig + +if [ $PRIVATE_REPOS -eq 1 ]; then + prepareFolders_PrivateRepos + + copyProxyConfig + updateProxyConfig + copyTxGenConfig + updateTxGenConfig +fi diff --git a/scripts/testnet/include/build.sh b/scripts/testnet/include/build.sh index 605e77a2bed..5f0dcbe4a6f 100644 --- a/scripts/testnet/include/build.sh +++ b/scripts/testnet/include/build.sh @@ -9,10 +9,6 @@ prepareFolders() { [ -d seednode ] || mkdir -p seednode [ -d seednode/config ] || mkdir -p seednode/config [ -d node_working_dirs ] || mkdir -p node_working_dirs - [ -d proxy ] || mkdir -p proxy - [ -d ./proxy/config ] || mkdir -p ./proxy/config - [ -d txgen ] || mkdir -p txgen - [ -d ./txgen/config ] || mkdir -p ./txgen/config } prepareFolders_PrivateRepos() { @@ -31,7 +27,7 @@ buildConfigGenerator() { popd pushd $TESTNETDIR - cp $CONFIGGENERATOR ./filegen/ + mv $CONFIGGENERATOR ./filegen/ echo "Configuration Generator built..." popd } @@ -40,11 +36,11 @@ buildConfigGenerator() { buildNode() { echo "Building Node executable..." pushd $NODEDIR - go build . + go build -gcflags="all=-N -l" . popd pushd $TESTNETDIR - cp $NODE ./node/ + mv $NODE ./node/ echo "Node executable built." popd } @@ -56,7 +52,7 @@ buildSeednode() { popd pushd $TESTNETDIR - cp $SEEDNODE ./seednode/ + mv $SEEDNODE ./seednode/ echo "Seednode executable built." popd } @@ -68,7 +64,7 @@ buildProxy() { popd pushd $TESTNETDIR - cp $PROXY ./proxy/ + mv $PROXY ./proxy/ echo "Proxy executable built." popd } @@ -80,7 +76,7 @@ buildTxGen() { popd pushd $TESTNETDIR - cp $TXGEN ./txgen/ + mv $TXGEN ./txgen/ echo "TxGen executable built." popd } diff --git a/scripts/testnet/include/nodes.sh b/scripts/testnet/include/nodes.sh index 15135803724..0d5b16149af 100644 --- a/scripts/testnet/include/nodes.sh +++ b/scripts/testnet/include/nodes.sh @@ -121,7 +121,7 @@ assembleCommand_startObserverNode() { local nodeCommand="nice -n $NODE_NICENESS ./node \ -port $PORT -rest-api-interface localhost:$RESTAPIPORT \ -tx-sign-sk-index $KEY_INDEX -sk-index $KEY_INDEX \ - -num-of-nodes $TOTAL_NODECOUNT -storage-cleanup -destination-shard-as-observer $SHARD \ + -num-of-nodes $TOTAL_NODECOUNT -destination-shard-as-observer $SHARD \ -working-directory $WORKING_DIR" if [ $NODETERMUI -eq 0 ] @@ -145,7 +145,7 @@ assembleCommand_startValidatorNode() { local nodeCommand="nice -n $NODE_NICENESS ./node \ -port $PORT -rest-api-interface localhost:$RESTAPIPORT \ -tx-sign-sk-index $KEY_INDEX -sk-index $KEY_INDEX \ - -num-of-nodes $TOTAL_NODECOUNT -storage-cleanup \ + -num-of-nodes $TOTAL_NODECOUNT \ -working-directory $WORKING_DIR" if [ $NODETERMUI -eq 0 ] diff --git a/scripts/testnet/start.sh b/scripts/testnet/start.sh index 034c9be09cb..e8a8f04635f 100755 --- a/scripts/testnet/start.sh +++ b/scripts/testnet/start.sh @@ -14,13 +14,11 @@ source "$ELRONDTESTNETSCRIPTSDIR/include/tools.sh" prepareFolders # Phase 1: build Seednode and Node executables -buildConfigGenerator buildSeednode buildNode # Phase 2: generate configuration -generateConfig copyConfig copySeednodeConfig @@ -29,6 +27,7 @@ updateSeednodeConfig copyNodeConfig updateNodeConfig + # Phase 3: start the Seednode startSeednode showTerminalSession "elrond-tools" @@ -47,10 +46,6 @@ if [ $PRIVATE_REPOS -eq 1 ]; then prepareFolders_PrivateRepos buildProxy buildTxGen - copyProxyConfig - updateProxyConfig - copyTxGenConfig - updateTxGenConfig # Phase 6: start the Proxy startProxy diff --git a/sharding/errors.go b/sharding/errors.go index 71820dfa26a..b7183b605b3 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -16,6 +16,12 @@ var ErrShardIdOutOfRange = errors.New("shard id out of range") // ErrNilPubKey signals that the public key is nil var ErrNilPubKey = errors.New("nil public key") +// ErrNilNodesCoordinator signals that the nodesCoordinator is nil +var ErrNilNodesCoordinator = errors.New("nil nodesCoordinator") + +// ErrNilRater signals that the rater is nil +var ErrNilRater = errors.New("nil rater") + // ErrNoPubKeys signals an error when public keys are missing var ErrNoPubKeys = errors.New("no public keys defined") @@ -81,3 +87,6 @@ var ErrNilAddress = errors.New("nil address") // ErrValidatorNotFound signals that the validator has not been found var ErrValidatorNotFound = errors.New("validator not found") + +// ErrNotImplemented signals a call of a non implemented functionality +var ErrNotImplemented = errors.New("feature not implemented") diff --git a/sharding/export_test.go b/sharding/export_test.go index cf6427cb891..39d7282b1ad 100644 --- a/sharding/export_test.go +++ b/sharding/export_test.go @@ -35,3 +35,7 @@ func CommunicationIdentifierBetweenShards(shardId1 uint32, shardId2 uint32) stri func (ihgs *indexHashedNodesCoordinator) EligibleList() []Validator { return ihgs.nodesMap[ihgs.shardId] } + +func (ihgs *indexHashedNodesCoordinatorWithRater) ExpandEligibleList(shardId uint32) []Validator { + return ihgs.expandEligibleList(shardId) +} diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go index 9b4f5c76d15..36195708e6a 100644 --- a/sharding/indexHashedNodesCoordinator.go +++ b/sharding/indexHashedNodesCoordinator.go @@ -4,13 +4,13 @@ import ( "bytes" "encoding/binary" "fmt" - "math/big" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/hashing" ) type indexHashedNodesCoordinator struct { + doExpandEligibleList func(uint32) []Validator nbShards uint32 shardId uint32 hasher hashing.Hasher @@ -37,6 +37,8 @@ func NewIndexHashedNodesCoordinator(arguments ArgNodesCoordinator) (*indexHashed selfPubKey: arguments.SelfPublicKey, } + ihgs.doExpandEligibleList = ihgs.expandEligibleList + err = ihgs.SetNodesPerShards(arguments.Nodes) if err != nil { return nil, err @@ -88,6 +90,11 @@ func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Va return nil } +// GetNodesPerShard returns the nodes per shard map +func (ihgs *indexHashedNodesCoordinator) GetNodesPerShard() map[uint32][]Validator { + return ihgs.nodesMap +} + // ComputeValidatorsGroup will generate a list of validators based on the the eligible list, // consensus group size and a randomness source // Steps: @@ -119,7 +126,8 @@ func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup( randomness = []byte(fmt.Sprintf("%d-%s", round, core.ToB64(randomness))) // TODO: pre-compute eligible list and update only on rating change. - expandedList := ihgs.expandEligibleList(shardId) + expandedList := ihgs.doExpandEligibleList(shardId) + lenExpandedList := len(expandedList) for startIdx := 0; startIdx < consensusSize; startIdx++ { @@ -271,12 +279,10 @@ func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, lenL indexHash := ihgs.hasher.Compute(string(buffCurrentIndex) + randomSource) - computedLargeIndex := big.NewInt(0) - computedLargeIndex.SetBytes(indexHash) - lenExpandedEligibleList := big.NewInt(int64(lenList)) + computedLargeIndex := binary.BigEndian.Uint64(indexHash) + lenExpandedEligibleList := uint64(lenList) - // computedListIndex = computedLargeIndex % len(expandedEligibleList) - computedListIndex := big.NewInt(0).Mod(computedLargeIndex, lenExpandedEligibleList).Int64() + computedListIndex := computedLargeIndex % lenExpandedEligibleList return int(computedListIndex) } diff --git a/sharding/indexHashedNodesCoordinatorWithRater.go b/sharding/indexHashedNodesCoordinatorWithRater.go new file mode 100644 index 00000000000..e1e618ea984 --- /dev/null +++ b/sharding/indexHashedNodesCoordinatorWithRater.go @@ -0,0 +1,51 @@ +package sharding + +import ( + "github.com/ElrondNetwork/elrond-go/core/check" +) + +type indexHashedNodesCoordinatorWithRater struct { + *indexHashedNodesCoordinator + RatingReader +} + +// NewIndexHashedNodesCoordinator creates a new index hashed group selector +func NewIndexHashedNodesCoordinatorWithRater( + indexNodesCoordinator *indexHashedNodesCoordinator, + rater RatingReader, +) (*indexHashedNodesCoordinatorWithRater, error) { + if check.IfNil(indexNodesCoordinator) { + return nil, ErrNilNodesCoordinator + } + if check.IfNil(rater) { + return nil, ErrNilRater + } + + ihncr := &indexHashedNodesCoordinatorWithRater{ + indexHashedNodesCoordinator: indexNodesCoordinator, + RatingReader: rater, + } + + indexNodesCoordinator.doExpandEligibleList = ihncr.expandEligibleList + + return ihncr, nil +} + +func (ihgs *indexHashedNodesCoordinatorWithRater) expandEligibleList(shardId uint32) []Validator { + validatorList := make([]Validator, 0) + + for _, validator := range ihgs.nodesMap[shardId] { + pk := validator.PubKey() + rating := ihgs.GetRating(string(pk)) + for i := uint32(0); i < rating; i++ { + validatorList = append(validatorList, validator) + } + } + + return validatorList +} + +//IsInterfaceNil verifies that the underlying value is nil +func (ihgs *indexHashedNodesCoordinatorWithRater) IsInterfaceNil() bool { + return ihgs == nil +} diff --git a/sharding/indexHashedNodesCoordinatorWithRater_test.go b/sharding/indexHashedNodesCoordinatorWithRater_test.go new file mode 100644 index 00000000000..a1ffb0e86bc --- /dev/null +++ b/sharding/indexHashedNodesCoordinatorWithRater_test.go @@ -0,0 +1,418 @@ +package sharding_test + +import ( + "fmt" + "math/big" + "math/rand" + "runtime" + "strconv" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/sharding/mock" + "github.com/stretchr/testify/assert" +) + +func createArguments() sharding.ArgNodesCoordinator { + nodesMap := createDummyNodesMap() + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("test"), + } + return arguments +} + +func TestNewIndexHashedNodesCoordinatorWithRater_NilRaterShouldErr(t *testing.T) { + nc, _ := sharding.NewIndexHashedNodesCoordinator(createArguments()) + ihgs, err := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, nil) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrNilRater, err) +} + +func TestNewIndexHashedNodesCoordinatorWithRater_NilNodesCoordinatorShouldErr(t *testing.T) { + ihgs, err := sharding.NewIndexHashedNodesCoordinatorWithRater(nil, &mock.RaterMock{}) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrNilNodesCoordinator, err) +} + +func TestNewIndexHashedGroupSelectorWithRater_OkValsShouldWork(t *testing.T) { + t.Parallel() + + nc, _ := sharding.NewIndexHashedNodesCoordinator(createArguments()) + ihgs, err := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) + assert.NotNil(t, ihgs) + assert.Nil(t, err) +} + +//------- LoadEligibleList + +func TestIndexHashedGroupSelectorWithRater_SetNilNodesMapShouldErr(t *testing.T) { + t.Parallel() + + nc, _ := sharding.NewIndexHashedNodesCoordinator(createArguments()) + ihgs, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) + assert.Equal(t, sharding.ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil)) +} + +func TestIndexHashedGroupSelectorWithRater_OkValShouldWork(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + nc, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgs, err := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) + assert.Nil(t, err) + assert.Equal(t, nodesMap[0], ihgs.EligibleList()) +} + +//------- functionality tests + +func TestIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup1ValidatorShouldCallGetRating(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + raterCalled := false + rater := &mock.RaterMock{GetRatingCalled: func(string) uint32 { + raterCalled = true + return 1 + }} + + nc, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgs, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, rater) + list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness"), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, 1, len(list2)) + assert.Equal(t, true, raterCalled) +} + +func TestIndexHashedGroupSelectorWithRater_ComputeExpandedList(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 2, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + + ratingPk0 := uint32(5) + ratingPk1 := uint32(1) + rater := &mock.RaterMock{GetRatingCalled: func(pk string) uint32 { + if pk == "pk0" { + return ratingPk0 + } + if pk == "pk1" { + return ratingPk1 + } + return 1 + }} + + nc, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgs, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, rater) + expandedList := ihgs.ExpandEligibleList(0) + assert.Equal(t, int(ratingPk0+ratingPk1), len(expandedList)) + + occurences := make(map[string]uint32, 2) + occurences["pk0"] = 0 + occurences["pk1"] = 0 + for _, validator := range expandedList { + occurences[string(validator.PubKey())]++ + } + + assert.Equal(t, ratingPk0, occurences["pk0"]) + assert.Equal(t, ratingPk1, occurences["pk1"]) +} + +func BenchmarkIndexHashedGroupSelectorWithRater_ComputeValidatorsGroup21of400(b *testing.B) { + consensusGroupSize := 21 + list := make([]sharding.Validator, 0) + + //generate 400 validators + for i := 0; i < 400; i++ { + list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)), []byte("addr"+strconv.Itoa(i)))) + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgsRater, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(ihgs, &mock.RaterMock{}) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + randomness := strconv.Itoa(i) + list2, _ := ihgsRater.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Equal(b, consensusGroupSize, len(list2)) + } +} + +func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturnErrNilPubKey(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + nc, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgs, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) + + _, _, err := ihgs.GetValidatorWithPublicKey(nil) + assert.Equal(t, sharding.ErrNilPubKey, err) +} + +func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldReturnErrValidatorNotFound(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + nc, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgs, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) + + _, _, err := ihgs.GetValidatorWithPublicKey([]byte("pk1")) + assert.Equal(t, sharding.ErrValidatorNotFound, err) +} + +func TestIndexHashedGroupSelectorWithRater_GetValidatorWithPublicKeyShouldWork(t *testing.T) { + t.Parallel() + + listMeta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_meta"), []byte("addr0_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_meta"), []byte("addr1_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_meta"), []byte("addr2_meta")), + } + listShard0 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard0"), []byte("addr0_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard0"), []byte("addr1_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard0"), []byte("addr2_shard0")), + } + listShard1 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard1"), []byte("addr0_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard1"), []byte("addr1_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard1"), []byte("addr2_shard1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[sharding.MetachainShardId] = listMeta + nodesMap[0] = listShard0 + nodesMap[1] = listShard1 + + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 2, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + nc, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgs, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) + + validator, shardId, err := ihgs.GetValidatorWithPublicKey([]byte("pk0_meta")) + assert.Nil(t, err) + assert.Equal(t, sharding.MetachainShardId, shardId) + assert.Equal(t, []byte("addr0_meta"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk1_shard0")) + assert.Nil(t, err) + assert.Equal(t, uint32(0), shardId) + assert.Equal(t, []byte("addr1_shard0"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk2_shard1")) + assert.Nil(t, err) + assert.Equal(t, uint32(1), shardId) + assert.Equal(t, []byte("addr2_shard1"), validator.Address()) +} + +func TestIndexHashedGroupSelectorWithRater_GetAllValidatorsPublicKeys(t *testing.T) { + t.Parallel() + + shardZeroId := uint32(0) + shardOneId := uint32(1) + expectedValidatorsPubKeys := map[uint32][][]byte{ + shardZeroId: {[]byte("pk0_shard0"), []byte("pk1_shard0"), []byte("pk2_shard0")}, + shardOneId: {[]byte("pk0_shard1"), []byte("pk1_shard1"), []byte("pk2_shard1")}, + sharding.MetachainShardId: {[]byte("pk0_meta"), []byte("pk1_meta"), []byte("pk2_meta")}, + } + + listMeta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][0], []byte("addr0_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][1], []byte("addr1_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][2], []byte("addr2_meta")), + } + listShard0 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][0], []byte("addr0_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][1], []byte("addr1_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][2], []byte("addr2_shard0")), + } + listShard1 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][0], []byte("addr0_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][1], []byte("addr1_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][2], []byte("addr2_shard1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[sharding.MetachainShardId] = listMeta + nodesMap[shardZeroId] = listShard0 + nodesMap[shardOneId] = listShard1 + + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: 1, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + ShardId: shardZeroId, + NbShards: 2, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + + nc, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgs, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(nc, &mock.RaterMock{}) + + allValidatorsPublicKeys := ihgs.GetAllValidatorsPublicKeys() + assert.Equal(t, expectedValidatorsPubKeys, allValidatorsPublicKeys) +} + +func BenchmarkIndexHashedGroupSelectorWithRater_TestExpandList(b *testing.B) { + m := runtime.MemStats{} + runtime.ReadMemStats(&m) + + fmt.Println(m.TotalAlloc) + + nrNodes := 40000 + ratingSteps := 100 + array := make([]int, nrNodes*ratingSteps) + for i := 0; i < nrNodes; i++ { + for j := 0; j < ratingSteps; j++ { + array[i*ratingSteps+j] = i + } + } + + //a := []int{1, 2, 3, 4, 5, 6, 7, 8} + rand.Seed(time.Now().UnixNano()) + rand.Shuffle(len(array), func(i, j int) { array[i], array[j] = array[j], array[i] }) + m2 := runtime.MemStats{} + + runtime.ReadMemStats(&m2) + + fmt.Println(m2.TotalAlloc) + fmt.Println(fmt.Sprintf("Used %d MB", (m2.TotalAlloc-m.TotalAlloc)/1024/1024)) + //fmt.Print(array[0:100]) +} + +func BenchmarkIndexHashedGroupSelectorWithRater_TestHashes(b *testing.B) { + nrElementsInList := int64(4000000) + nrHashes := 100 + + hasher := blake2b.Blake2b{} + + randomBits := "" + + for i := 0; i < nrHashes; i++ { + randomBits = fmt.Sprintf("%s%d", randomBits, rand.Intn(2)) + } + //computedListIndex := int64(0) + for i := 0; i < nrHashes; i++ { + computedHash := hasher.Compute(randomBits + fmt.Sprintf("%d", i)) + computedLargeIndex := big.NewInt(0) + computedLargeIndex.SetBytes(computedHash) + fmt.Println(big.NewInt(0).Mod(computedLargeIndex, big.NewInt(nrElementsInList)).Int64()) + } + + //fmt.Print(array[0:100]) +} + +func BenchmarkIndexHashedWithRaterGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { + consensusGroupSize := 21 + list := make([]sharding.Validator, 0) + + //generate 400 validators + for i := 0; i < 400; i++ { + list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)), []byte("addr"+strconv.Itoa(i)))) + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + + arguments := sharding.ArgNodesCoordinator{ + ShardConsensusGroupSize: consensusGroupSize, + MetaConsensusGroupSize: 1, + Hasher: &mock.HasherMock{}, + NbShards: 1, + Nodes: nodesMap, + SelfPublicKey: []byte("key"), + } + ihgs, _ := sharding.NewIndexHashedNodesCoordinator(arguments) + ihgsRater, _ := sharding.NewIndexHashedNodesCoordinatorWithRater(ihgs, &mock.RaterMock{}) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + randomness := strconv.Itoa(i) + list2, _ := ihgsRater.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Equal(b, consensusGroupSize, len(list2)) + } +} diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go index 22d18cde6f3..3ea9a8b5be8 100644 --- a/sharding/indexHashedNodesCoordinator_test.go +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -5,6 +5,7 @@ import ( "fmt" "math/big" "strconv" + "strings" "testing" "github.com/ElrondNetwork/elrond-go/core" @@ -13,14 +14,10 @@ import ( "github.com/stretchr/testify/assert" ) -func convertBigIntToBytes(value *big.Int) []byte { - return value.Bytes() -} - func uint64ToBytes(value uint64) []byte { buff := make([]byte, 8) - binary.BigEndian.PutUint64(buff, value) + return buff } @@ -299,12 +296,12 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testi //element 0 will be first element //element 1 will be the second hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) + if strings.Contains(s, "0-") { + return uint64ToBytes(0) } - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(1)) + if strings.Contains(s, "1-") { + return uint64ToBytes(1) } return nil @@ -340,11 +337,11 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrd //element 1 will be the first hasher.ComputeCalled = func(s string) []byte { if string(uint64ToBytes(0))+randSource == s { - return convertBigIntToBytes(big.NewInt(1)) + return uint64ToBytes(1) } if string(uint64ToBytes(1))+randSource == s { - return convertBigIntToBytes(big.NewInt(0)) + return uint64ToBytes(0) } return nil @@ -390,12 +387,12 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex //element 0 will be the first //element 1 will be the second as the same index is being returned and 0 is already in list hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) + if strings.Contains(s, "0-") { + return uint64ToBytes(0) } - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) + if strings.Contains(s, "1-") { + return uint64ToBytes(1) } return nil @@ -434,23 +431,22 @@ func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsSho // for index 4, hasher will return 0 which will translate to 0, 0 is already picked, 1 is already picked, 2 is already picked, // 3 is the 4-th element // for index 5, hasher will return 9 which will translate to 9, so 9, 0, 1, 2, 3 are already picked, 4 is the 5-th element - script := make(map[string]*big.Int) + script := make(map[string]uint64) - script[string(uint64ToBytes(0))+randomnessWithRound] = big.NewInt(11) //will translate to 1, add 1 - script[string(uint64ToBytes(1))+randomnessWithRound] = big.NewInt(1) //will translate to 1, add 2 - script[string(uint64ToBytes(2))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 9 - script[string(uint64ToBytes(3))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 0 - script[string(uint64ToBytes(4))+randomnessWithRound] = big.NewInt(0) //will translate to 0, add 3 - script[string(uint64ToBytes(5))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 4 + script[string(uint64ToBytes(0))+randomnessWithRound] = 11 //will translate to 1, add 1 + script[string(uint64ToBytes(1))+randomnessWithRound] = 1 //will translate to 1, add 2 + script[string(uint64ToBytes(2))+randomnessWithRound] = 9 //will translate to 9, add 9 + script[string(uint64ToBytes(3))+randomnessWithRound] = 9 //will translate to 9, add 0 + script[string(uint64ToBytes(4))+randomnessWithRound] = 0 //will translate to 0, add 3 + script[string(uint64ToBytes(5))+randomnessWithRound] = 9 //will translate to 9, add 4 hasher.ComputeCalled = func(s string) []byte { val, ok := script[s] - if !ok { assert.Fail(t, "should have not got here") } - return convertBigIntToBytes(val) + return uint64ToBytes(val) } validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0"), []byte("addr0")) diff --git a/sharding/interface.go b/sharding/interface.go index cc86e2352ae..99e002bf863 100644 --- a/sharding/interface.go +++ b/sharding/interface.go @@ -46,3 +46,52 @@ type PublicKeysSelector interface { GetValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32) ([]string, error) GetOwnPublicKey() []byte } + +// ArgsUpdateNodes holds the parameters required by the shuffler to generate a new nodes configuration +type ArgsUpdateNodes struct { + eligible map[uint32][]Validator + waiting map[uint32][]Validator + newNodes []Validator + leaving []Validator + rand []byte + nbShards uint32 +} + +// NodesShuffler provides shuffling functionality for nodes +type NodesShuffler interface { + UpdateParams(numNodesShard uint32, numNodesMeta uint32, hysteresis float32, adaptivity bool) + UpdateNodeLists(args ArgsUpdateNodes) (map[uint32][]Validator, map[uint32][]Validator, []Validator) +} + +//RaterHandler provides Rating Computation Capabilites for the Nodes Coordinator and ValidatorStatistics +type RaterHandler interface { + RatingReader + //GetStartRating gets the start rating values + GetStartRating() uint32 + //ComputeIncreaseProposer computes the new rating for the increaseLeader + ComputeIncreaseProposer(val uint32) uint32 + //ComputeDecreaseProposer computes the new rating for the decreaseLeader + ComputeDecreaseProposer(val uint32) uint32 + //ComputeIncreaseValidator computes the new rating for the increaseValidator + ComputeIncreaseValidator(val uint32) uint32 + //ComputeDecreaseValidator computes the new rating for the decreaseValidator + ComputeDecreaseValidator(val uint32) uint32 +} + +//RatingReader provides rating reading capabilities for the ratingHandler +type RatingReader interface { + //GetRating gets the rating for the public key + GetRating(string) uint32 + //GetRatings gets all the ratings as a map[pk] ratingValue + GetRatings([]string) map[string]uint32 + //IsInterfaceNil verifies if the interface is nil + IsInterfaceNil() bool +} + +//RatingReaderSetter provides the capabilities to set a RatingReader +type RatingReaderSetter interface { + //GetRating gets the rating for the public key + SetRatingReader(RatingReader) + //IsInterfaceNil verifies if the interface is nil + IsInterfaceNil() bool +} diff --git a/sharding/mock/hasherMock.go b/sharding/mock/hasherMock.go index 0218936b5c0..b411defa1c8 100644 --- a/sharding/mock/hasherMock.go +++ b/sharding/mock/hasherMock.go @@ -11,7 +11,7 @@ type HasherMock struct { // Compute will output the SHA's equivalent of the input string func (sha *HasherMock) Compute(s string) []byte { h := sha256.New() - h.Write([]byte(s)) + _, _ = h.Write([]byte(s)) return h.Sum(nil) } @@ -30,8 +30,5 @@ func (sha *HasherMock) Size() int { // IsInterfaceNil returns true if there is no value under the interface func (sha *HasherMock) IsInterfaceNil() bool { - if sha == nil { - return true - } - return false + return sha == nil } diff --git a/sharding/mock/raterMock.go b/sharding/mock/raterMock.go new file mode 100644 index 00000000000..2e371fec154 --- /dev/null +++ b/sharding/mock/raterMock.go @@ -0,0 +1,40 @@ +package mock + +type RaterMock struct { + ComputeRatingCalled func(string, uint32) uint32 + GetRatingCalled func(string) uint32 + GetRatingsCalled func([]string) map[string]uint32 + GetStartRatingCalled func() uint32 +} + +func (rm *RaterMock) ComputeRating(ratingOptionKey string, previousValue uint32) uint32 { + if rm.ComputeRatingCalled != nil { + return rm.ComputeRatingCalled(ratingOptionKey, previousValue) + } + return 1 +} + +func (rm *RaterMock) GetRating(pk string) uint32 { + if rm.GetRatingCalled != nil { + return rm.GetRatingCalled(pk) + } + return 1 +} + +func (rm *RaterMock) GetRatings(pks []string) map[string]uint32 { + if rm.GetRatingsCalled == nil { + return rm.GetRatingsCalled(pks) + } + return make(map[string]uint32) +} + +func (rm *RaterMock) IsInterfaceNil() bool { + return rm == nil +} + +func (rm *RaterMock) GetStartRating() uint32 { + if rm.GetStartRatingCalled == nil { + return rm.GetStartRatingCalled() + } + return 5 +} diff --git a/sharding/xorValidatorShuffler.go b/sharding/xorValidatorShuffler.go new file mode 100644 index 00000000000..69e5c6e0912 --- /dev/null +++ b/sharding/xorValidatorShuffler.go @@ -0,0 +1,338 @@ +package sharding + +import ( + "sort" + "sync" +) + +// TODO: Decide if transaction load statistics will be used for limiting the number of shards +type randXORShuffler struct { + nodesShard uint32 + nodesMeta uint32 + shardHysteresis uint32 + metaHysteresis uint32 + adaptivity bool + mutShufflerParams sync.RWMutex +} + +// NewXorValidatorsShuffler creates a validator shuffler that uses a XOR between validator key and a given +// random number to do the shuffling +func NewXorValidatorsShuffler( + nodesShard uint32, + nodesMeta uint32, + hysteresis float32, + adaptivity bool, +) *randXORShuffler { + rxs := &randXORShuffler{} + + rxs.UpdateParams(nodesShard, nodesMeta, hysteresis, adaptivity) + + return rxs +} + +// UpdateParams updates the shuffler parameters +// Should be called when new params are agreed through governance +func (rxs *randXORShuffler) UpdateParams( + nodesShard uint32, + nodesMeta uint32, + hysteresis float32, + adaptivity bool, +) { + // TODO: are there constraints we want to enforce? e.g min/max hysteresis + shardHysteresis := uint32(float32(nodesShard) * hysteresis) + metaHysteresis := uint32(float32(nodesMeta) * hysteresis) + + rxs.mutShufflerParams.Lock() + rxs.shardHysteresis = shardHysteresis + rxs.metaHysteresis = metaHysteresis + rxs.nodesShard = nodesShard + rxs.nodesMeta = nodesMeta + rxs.adaptivity = adaptivity + rxs.mutShufflerParams.Unlock() +} + +// UpdateNodeLists shuffles the nodes and returns the lists with the new nodes configuration +// The function needs to ensure that: +// 1. Old eligible nodes list will have up to shuffleOutThreshold percent nodes shuffled out from each shard +// 2. The leaving nodes are checked against the eligible nodes and waiting nodes and removed if present from the +// pools and leaving nodes list (if remaining nodes can still sustain the shard) +// 3. shuffledOutNodes = oldEligibleNodes + waitingListNodes - minNbNodesPerShard (for each shard) +// 4. Old waiting nodes list for each shard will be added to the remaining eligible nodes list +// 5. The new nodes are equally distributed among the existing shards into waiting lists +// 6. The shuffled out nodes are distributed among the existing shards into waiting lists. +// We may have three situations: +// a) In case (shuffled out nodes + new nodes) > (nbShards * perShardHysteresis + minNodesPerShard) then +// we need to prepare for a split event, so a higher percentage of nodes need to be directed to the shard +// that will be split. +// b) In case (shuffled out nodes + new nodes) < (nbShards * perShardHysteresis) then we can immediately +// execute the shard merge +// c) No change in the number of shards then nothing extra needs to be done +func (rxs *randXORShuffler) UpdateNodeLists(args ArgsUpdateNodes) (map[uint32][]Validator, map[uint32][]Validator, []Validator) { + var shuffledOutNodes []Validator + eligibleAfterReshard := copyValidatorMap(args.eligible) + waitingAfterReshard := copyValidatorMap(args.waiting) + + newNbShards := rxs.computeNewShards(args.eligible, args.waiting, args.newNodes, args.leaving, args.nbShards) + + rxs.mutShufflerParams.RLock() + canSplit := rxs.adaptivity && newNbShards > args.nbShards + canMerge := rxs.adaptivity && newNbShards < args.nbShards + rxs.mutShufflerParams.RUnlock() + + leavingNodes := args.leaving + + if canSplit { + eligibleAfterReshard, waitingAfterReshard = rxs.splitShards(args.eligible, args.waiting, newNbShards) + } + if canMerge { + eligibleAfterReshard, waitingAfterReshard = rxs.mergeShards(args.eligible, args.waiting, newNbShards) + } + + for shard, vList := range waitingAfterReshard { + nbToRemove := len(vList) + if len(leavingNodes) < nbToRemove { + nbToRemove = len(leavingNodes) + } + + vList, leavingNodes = removeValidatorsFromList(vList, leavingNodes, nbToRemove) + waitingAfterReshard[shard] = vList + } + + shuffledOutNodes, eligibleAfterReshard, leavingNodes = shuffleOutNodes( + eligibleAfterReshard, + waitingAfterReshard, + leavingNodes, + args.rand, + ) + promoteWaitingToEligible(eligibleAfterReshard, waitingAfterReshard) + distributeValidators(args.newNodes, waitingAfterReshard, args.rand, newNbShards+1) + distributeValidators(shuffledOutNodes, waitingAfterReshard, args.rand, newNbShards+1) + + return eligibleAfterReshard, waitingAfterReshard, leavingNodes +} + +// computeNewShards determines the new number of shards based on the number of nodes in the network +func (rxs *randXORShuffler) computeNewShards( + eligible map[uint32][]Validator, + waiting map[uint32][]Validator, + newNodes []Validator, + leavingNodes []Validator, + nbShards uint32, +) uint32 { + + nbEligible := 0 + nbWaiting := 0 + for shard := range eligible { + nbEligible += len(eligible[shard]) + nbWaiting += len(waiting[shard]) + } + + nodesNewEpoch := uint32(nbEligible + nbWaiting + len(newNodes) - len(leavingNodes)) + + rxs.mutShufflerParams.RLock() + maxNodesMeta := rxs.nodesMeta + rxs.metaHysteresis + maxNodesShard := rxs.nodesShard + rxs.shardHysteresis + nodesForSplit := (nbShards+1)*maxNodesShard + maxNodesMeta + nodesForMerge := nbShards*rxs.nodesShard + rxs.nodesMeta + rxs.mutShufflerParams.RUnlock() + + nbShardsNew := nbShards + if nodesNewEpoch > nodesForSplit { + nbNodesWithoutMaxMeta := nodesNewEpoch - maxNodesMeta + nbShardsNew = nbNodesWithoutMaxMeta / maxNodesShard + + return nbShardsNew + } + + if nodesNewEpoch < nodesForMerge { + return nbShardsNew - 1 + } + + return nbShardsNew +} + +// shuffleOutNodes shuffles the list of eligible validators in each shard and returns the array of shuffled out +// validators +func shuffleOutNodes( + eligible map[uint32][]Validator, + waiting map[uint32][]Validator, + leaving []Validator, + randomness []byte, +) ([]Validator, map[uint32][]Validator, []Validator) { + shuffledOut := make([]Validator, 0) + newEligible := make(map[uint32][]Validator) + var removed []Validator + + for shard, validators := range eligible { + + nodesToSelect := len(waiting[shard]) + + if len(validators) < nodesToSelect { + nodesToSelect = len(validators) + } + + validators, removed = removeValidatorsFromList(validators, leaving, nodesToSelect) + leaving, _ = removeValidatorsFromList(leaving, removed, len(removed)) + + nodesToSelect -= len(removed) + shardShuffledEligible := shuffleList(validators, randomness) + shardShuffledOut := shardShuffledEligible[:nodesToSelect] + shuffledOut = append(shuffledOut, shardShuffledOut...) + + newEligible[shard], _ = removeValidatorsFromList(validators, shardShuffledOut, len(shardShuffledOut)) + } + + return shuffledOut, newEligible, leaving +} + +// shuffleList returns a shuffled list of validators. +// The shuffling is done based by xor-ing the randomness with the +// public keys of validators and sorting the validators depending on +// the xor result. +func shuffleList(validators []Validator, randomness []byte) []Validator { + keys := make([]string, len(validators)) + mapValidators := make(map[string]Validator) + + for i, v := range validators { + keys[i] = string(xorBytes(v.PubKey(), randomness)) + mapValidators[keys[i]] = v + } + + sort.Strings(keys) + + result := make([]Validator, len(validators)) + for i := 0; i < len(validators); i++ { + result[i] = mapValidators[keys[i]] + } + + return result +} + +func removeValidatorsFromList( + validatorList []Validator, + validatorsToRemove []Validator, + maxToRemove int, +) ([]Validator, []Validator) { + resultedList := make([]Validator, 0) + resultedList = append(resultedList, validatorList...) + removed := make([]Validator, 0) + + for _, v2 := range validatorsToRemove { + for i, v1 := range resultedList { + if v1 == v2 { + resultedList = removeValidatorFromList(resultedList, i) + removed = append(removed, v1) + break + } + } + + if len(removed) == maxToRemove { + break + } + } + + return resultedList, removed +} + +// removeValidatorFromList replaces the element at given index with the last element in the slice and returns a slice +// with a decremented length.The order in the list is important as long as it is kept the same for all validators, +// so not critical to maintain the original order inside the list, as that would be slower. +// +// Attention: The slice given as parameter will have its element on position index swapped with the last element +func removeValidatorFromList(validatorList []Validator, index int) []Validator { + indexNotOK := index > len(validatorList)-1 || index < 0 + + if indexNotOK { + return validatorList + } + + validatorList[index] = validatorList[len(validatorList)-1] + return validatorList[:len(validatorList)-1] +} + +// xorBytes XORs two byte arrays up to the shortest length of the two, and returns the resulted XORed bytes. +func xorBytes(a []byte, b []byte) []byte { + lenA := len(a) + lenB := len(b) + minLen := lenA + + if lenB < minLen { + minLen = lenB + } + + result := make([]byte, minLen) + for i := 0; i < minLen; i++ { + result[i] = a[i] ^ b[i] + } + + return result +} + +// splitShards prepares for the shards split, or if already prepared does the split returning the resulting +// shards configuration for eligible and waiting lists +func (rxs *randXORShuffler) splitShards( + eligible map[uint32][]Validator, + waiting map[uint32][]Validator, + newNbShards uint32, +) (map[uint32][]Validator, map[uint32][]Validator) { + log.Error(ErrNotImplemented.Error()) + + // TODO: do the split + return copyValidatorMap(eligible), copyValidatorMap(waiting) +} + +// mergeShards merges the required shards, returning the resulting shards configuration for eligible and waiting lists +func (rxs *randXORShuffler) mergeShards( + eligible map[uint32][]Validator, + waiting map[uint32][]Validator, + newNbShards uint32, +) (map[uint32][]Validator, map[uint32][]Validator) { + log.Error(ErrNotImplemented.Error()) + + // TODO: do the merge + return copyValidatorMap(eligible), copyValidatorMap(waiting) +} + +// copyValidatorMap creates a copy for the Validators map, creating copies for each of the lists for each shard +func copyValidatorMap(validators map[uint32][]Validator) map[uint32][]Validator { + result := make(map[uint32][]Validator) + + for k, v := range validators { + elems := make([]Validator, 0) + result[k] = append(elems, v...) + } + + return result +} + +// promoteWaitingToEligible moves the validators in the waiting list to corresponding eligible list +func promoteWaitingToEligible(eligible map[uint32][]Validator, waiting map[uint32][]Validator) { + for k, v := range waiting { + eligible[k] = append(eligible[k], v...) + waiting[k] = make([]Validator, 0) + } +} + +// distributeNewNodes distributes a list of validators to the given validators map +func distributeValidators( + validators []Validator, + destLists map[uint32][]Validator, + randomness []byte, + nbShardsPlusMeta uint32, +) { + // if there was a split or a merge, eligible map should already have a different nb of keys (shards) + shuffledValidators := shuffleList(validators, randomness) + var shardId uint32 + + if len(destLists) == 0 { + destLists = make(map[uint32][]Validator) + } + + for i, v := range shuffledValidators { + shardId = uint32(i) % nbShardsPlusMeta + if shardId == nbShardsPlusMeta-1 { + shardId = MetachainShardId + } + destLists[shardId] = append(destLists[shardId], v) + } +} diff --git a/sharding/xorValidatorShuffler_test.go b/sharding/xorValidatorShuffler_test.go new file mode 100644 index 00000000000..f03214e178d --- /dev/null +++ b/sharding/xorValidatorShuffler_test.go @@ -0,0 +1,776 @@ +package sharding + +import ( + "bytes" + "crypto/rand" + "reflect" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +var firstArray = []byte{0xFF, 0xFF, 0xAA, 0xAA, 0x00, 0x00} +var secondArray = []byte{0xFF, 0x00, 0xAA, 0x55, 0x00, 0xFF} +var expectedArray = []byte{0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF} + +func generateRandomByteArray(size int) []byte { + r := make([]byte, size) + _, _ = rand.Read(r) + + return r +} + +func generateValidatorList(number int) []Validator { + v := make([]Validator, number) + + for i := 0; i < number; i++ { + v[i] = &validator{ + pubKey: generateRandomByteArray(32), + } + } + + return v +} + +func generateValidatorMap( + nodesPerShard int, + nbShards uint32, +) map[uint32][]Validator { + validatorsMap := make(map[uint32][]Validator) + + for i := uint32(0); i < nbShards; i++ { + validatorsMap[i] = generateValidatorList(nodesPerShard) + } + + validatorsMap[MetachainShardId] = generateValidatorList(nodesPerShard) + + return validatorsMap +} + +func contains(a []Validator, b []Validator) bool { + var found bool + for _, va := range a { + found = false + for _, vb := range b { + if reflect.DeepEqual(va, vb) { + found = true + break + } + } + if !found { + return found + } + } + + return found +} + +func testRemoveValidators( + t *testing.T, + initialValidators []Validator, + validatorsToRemove []Validator, + remaining []Validator, + removed []Validator, + maxToRemove int, +) { + nbRemoved := maxToRemove + if nbRemoved > len(validatorsToRemove) { + nbRemoved = len(validatorsToRemove) + } + + assert.Equal(t, nbRemoved, len(removed)) + assert.Equal(t, len(initialValidators)-len(remaining), nbRemoved) + + all := append(remaining, removed...) + assert.True(t, contains(all, initialValidators)) + assert.Equal(t, len(initialValidators), len(all)) +} + +func testDistributeValidators( + t *testing.T, + initialMap map[uint32][]Validator, + resultedMap map[uint32][]Validator, + distributedNodes []Validator, +) { + totalResultingValidators := make([]Validator, 0) + totalLen := 0 + for _, valList := range resultedMap { + totalResultingValidators = append(totalResultingValidators, valList...) + totalLen += len(valList) + } + + totalValidators := make([]Validator, 0) + for _, valList := range initialMap { + totalValidators = append(totalValidators, valList...) + } + assert.Equal(t, len(totalValidators)+len(distributedNodes), totalLen) + + totalValidators = append(totalValidators, distributedNodes...) + assert.True(t, contains(totalResultingValidators, totalValidators)) +} + +func numberMatchingNodes(searchList []Validator, toFind []Validator) int { + nbFound := 0 + for _, v1 := range toFind { + for _, v2 := range searchList { + if v1 == v2 { + nbFound++ + break + } + } + } + + return nbFound +} + +func testLeaving( + t *testing.T, + eligible map[uint32][]Validator, + waiting map[uint32][]Validator, + prevLeaving []Validator, + newLeaving []Validator, +) (int, map[uint32]int) { + nbLeavingPerShard := make(map[uint32]int) + + nbLeavingFromEligible := 0 + for i, eligibleList := range eligible { + nbWantingToLeaveFromList := numberMatchingNodes(eligibleList, prevLeaving) + maxAllowedToLeaveFromList := len(waiting[i]) + nbLeaving := nbWantingToLeaveFromList + if nbLeaving > maxAllowedToLeaveFromList { + nbLeaving = maxAllowedToLeaveFromList + } + + nbLeavingPerShard[i] += nbLeaving + nbLeavingFromEligible += nbLeaving + } + assert.Equal(t, nbLeavingFromEligible, len(prevLeaving)-len(newLeaving)) + + return nbLeavingFromEligible, nbLeavingPerShard +} + +func testShuffledOut( + t *testing.T, + eligibleMap map[uint32][]Validator, + waitingMap map[uint32][]Validator, + newEligible map[uint32][]Validator, + shuffledOut []Validator, + prevleaving []Validator, + newleaving []Validator, +) { + nbAllLeaving, _ := testLeaving(t, eligibleMap, waitingMap, prevleaving, newleaving) + allWaiting := getValidatorsInMap(waitingMap) + allEligible := getValidatorsInMap(eligibleMap) + assert.Equal(t, len(shuffledOut)+nbAllLeaving, len(allWaiting)) + + allNewEligible := getValidatorsInMap(newEligible) + assert.Equal(t, len(allEligible)-len(shuffledOut)-nbAllLeaving, len(allNewEligible)) + + newNodes := append(allNewEligible, shuffledOut...) + assert.NotEqual(t, allEligible, newNodes) + assert.True(t, contains(newNodes, allEligible)) +} + +func createDefaultXorShuffler() *randXORShuffler { + return NewXorValidatorsShuffler( + 100, + 100, + 0.2, + false, + ) +} + +func getValidatorsInMap(valMap map[uint32][]Validator) []Validator { + result := make([]Validator, 0) + + for _, valList := range valMap { + result = append(result, valList...) + } + + return result +} + +func Test_xorBytes_SameLen(t *testing.T) { + t.Parallel() + + result := xorBytes(firstArray, secondArray) + + assert.Equal(t, expectedArray, result) +} + +func Test_xorBytes_FirstLowerLen(t *testing.T) { + t.Parallel() + + result := xorBytes(firstArray[:len(firstArray)-1], secondArray) + + assert.Equal(t, expectedArray[:len(expectedArray)-1], result) +} + +func Test_xorBytes_SecondLowerLen(t *testing.T) { + t.Parallel() + + result := xorBytes(firstArray, secondArray[:len(secondArray)-1]) + + assert.Equal(t, expectedArray[:len(expectedArray)-1], result) +} + +func Test_xorBytes_FirstEmpty(t *testing.T) { + t.Parallel() + + result := xorBytes([]byte{}, secondArray) + + assert.Equal(t, []byte{}, result) +} + +func Test_xorBytes_SecondEmpty(t *testing.T) { + result := xorBytes(firstArray, []byte{}) + + assert.Equal(t, []byte{}, result) +} + +func Test_xorBytes_FirstNil(t *testing.T) { + t.Parallel() + + result := xorBytes(nil, secondArray) + + assert.Equal(t, []byte{}, result) +} + +func Test_xorBytes_SecondNil(t *testing.T) { + t.Parallel() + + result := xorBytes(firstArray, nil) + + assert.Equal(t, []byte{}, result) +} + +func Test_copyValidatorMap(t *testing.T) { + t.Parallel() + + valMap := generateValidatorMap(30, 2) + v2 := copyValidatorMap(valMap) + assert.Equal(t, valMap, v2) + + valMap[0] = valMap[0][1:] + assert.NotEqual(t, valMap, v2) +} + +func Test_promoteWaitingToEligibleEmptyList(t *testing.T) { + t.Parallel() + + eligibleMap := generateValidatorMap(30, 2) + waitingMap := generateValidatorMap(0, 2) + eligibleMapCopy := copyValidatorMap(eligibleMap) + + for k := range eligibleMap { + assert.Equal(t, eligibleMap[k], eligibleMapCopy[k]) + assert.Empty(t, waitingMap[k]) + } +} + +func Test_promoteWaitingToEligible(t *testing.T) { + t.Parallel() + + eligibleMap := generateValidatorMap(30, 2) + waitingMap := generateValidatorMap(22, 2) + + eligibleMapCopy := copyValidatorMap(eligibleMap) + waitingMapCopy := copyValidatorMap(waitingMap) + + promoteWaitingToEligible(eligibleMap, waitingMap) + + for k := range eligibleMap { + assert.Equal(t, eligibleMap[k], append(eligibleMapCopy[k], waitingMapCopy[k]...)) + assert.Empty(t, waitingMap[k]) + } +} + +func Test_removeValidatorFromListFirst(t *testing.T) { + t.Parallel() + + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + _ = copy(validatorsCopy, validators) + + v := removeValidatorFromList(validators, 0) + assert.Equal(t, validatorsCopy[len(validatorsCopy)-1], v[0]) + assert.NotEqual(t, validatorsCopy[0], v[0]) + assert.Equal(t, len(validatorsCopy)-1, len(v)) + + for i := 1; i < len(v); i++ { + assert.Equal(t, validatorsCopy[i], v[i]) + } +} + +func Test_removeValidatorFromListLast(t *testing.T) { + t.Parallel() + + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + _ = copy(validatorsCopy, validators) + + v := removeValidatorFromList(validators, len(validators)-1) + assert.Equal(t, len(validatorsCopy)-1, len(v)) + assert.Equal(t, validatorsCopy[:len(validatorsCopy)-1], v) +} + +func Test_removeValidatorFromListMiddle(t *testing.T) { + t.Parallel() + + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + _ = copy(validatorsCopy, validators) + + v := removeValidatorFromList(validators, len(validators)/2) + assert.Equal(t, len(validatorsCopy)-1, len(v)) + assert.Equal(t, validatorsCopy[len(validatorsCopy)-1], v[len(validatorsCopy)/2]) +} + +func Test_removeValidatorFromListIndexNegativeNoAction(t *testing.T) { + t.Parallel() + + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + _ = copy(validatorsCopy, validators) + + v := removeValidatorFromList(validators, -1) + assert.Equal(t, len(validatorsCopy), len(v)) + assert.Equal(t, validatorsCopy, v) +} + +func Test_removeValidatorFromListIndexTooBigNoAction(t *testing.T) { + t.Parallel() + + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + _ = copy(validatorsCopy, validators) + + v := removeValidatorFromList(validators, len(validators)) + assert.Equal(t, len(validatorsCopy), len(v)) + assert.Equal(t, validatorsCopy, v) +} + +func Test_removeValidatorsFromListRemoveFromStart(t *testing.T) { + t.Parallel() + + validatorsToRemoveFromStart := 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + + _ = copy(validatorsCopy, validators) + validatorsToRemove = append(validatorsToRemove, validators[:validatorsToRemoveFromStart]...) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, len(validatorsToRemove)) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, len(validatorsToRemove)) +} + +func Test_removeValidatorsFromListRemoveFromLast(t *testing.T) { + t.Parallel() + + validatorsToRemoveFromEnd := 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + + _ = copy(validatorsCopy, validators) + validatorsToRemove = append(validatorsToRemove, validators[len(validators)-validatorsToRemoveFromEnd:]...) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, len(validatorsToRemove)) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, len(validatorsToRemove)) +} + +func Test_removeValidatorsFromListRemoveFromFirstMaxSmaller(t *testing.T) { + t.Parallel() + + validatorsToRemoveFromStart := 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + maxToRemove := validatorsToRemoveFromStart - 1 + + _ = copy(validatorsCopy, validators) + validatorsToRemove = append(validatorsToRemove, validators[:validatorsToRemoveFromStart]...) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, maxToRemove) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, maxToRemove) +} + +func Test_removeValidatorsFromListRemoveFromFirstMaxGreater(t *testing.T) { + t.Parallel() + + validatorsToRemoveFromStart := 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + maxToRemove := validatorsToRemoveFromStart + 1 + + _ = copy(validatorsCopy, validators) + validatorsToRemove = append(validatorsToRemove, validators[:validatorsToRemoveFromStart]...) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, maxToRemove) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, maxToRemove) +} + +func Test_removeValidatorsFromListRemoveFromLastMaxSmaller(t *testing.T) { + t.Parallel() + + validatorsToRemoveFromEnd := 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + maxToRemove := validatorsToRemoveFromEnd - 1 + + _ = copy(validatorsCopy, validators) + validatorsToRemove = append(validatorsToRemove, validators[len(validators)-validatorsToRemoveFromEnd:]...) + assert.Equal(t, validatorsToRemoveFromEnd, len(validatorsToRemove)) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, maxToRemove) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, maxToRemove) +} + +func Test_removeValidatorsFromListRemoveFromLastMaxGreater(t *testing.T) { + t.Parallel() + + validatorsToRemoveFromEnd := 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + maxToRemove := validatorsToRemoveFromEnd + 1 + + _ = copy(validatorsCopy, validators) + validatorsToRemove = append(validatorsToRemove, validators[len(validators)-validatorsToRemoveFromEnd:]...) + assert.Equal(t, validatorsToRemoveFromEnd, len(validatorsToRemove)) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, maxToRemove) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, maxToRemove) +} + +func Test_removeValidatorsFromListRandomValidatorsMaxSmaller(t *testing.T) { + t.Parallel() + + nbValidatotrsToRemove := 10 + maxToRemove := nbValidatotrsToRemove - 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + + _ = copy(validatorsCopy, validators) + + sort.Slice(validators, func(i, j int) bool { + return bytes.Compare(validators[i].PubKey(), validators[j].PubKey()) < 0 + }) + + validatorsToRemove = append(validatorsToRemove, validators[:nbValidatotrsToRemove]...) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, maxToRemove) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, maxToRemove) +} + +func Test_removeValidatorsFromListRandomValidatorsMaxGreater(t *testing.T) { + t.Parallel() + + nbValidatotrsToRemove := 10 + maxToRemove := nbValidatotrsToRemove + 3 + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + validatorsToRemove := make([]Validator, 0) + + _ = copy(validatorsCopy, validators) + + sort.Slice(validators, func(i, j int) bool { + return bytes.Compare(validators[i].PubKey(), validators[j].PubKey()) < 0 + }) + + validatorsToRemove = append(validatorsToRemove, validators[:nbValidatotrsToRemove]...) + + v, removed := removeValidatorsFromList(validators, validatorsToRemove, maxToRemove) + testRemoveValidators(t, validatorsCopy, validatorsToRemove, v, removed, maxToRemove) +} + +func Test_shuffleList(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, 0) + validatorsCopy = append(validatorsCopy, validators...) + + shuffled := shuffleList(validators, randomness) + assert.Equal(t, len(validatorsCopy), len(shuffled)) + assert.NotEqual(t, validatorsCopy, shuffled) + assert.True(t, contains(shuffled, validatorsCopy)) +} + +func Test_shuffleListParameterNotChanged(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + validators := generateValidatorList(30) + validatorsCopy := make([]Validator, len(validators)) + _ = copy(validatorsCopy, validators) + + _ = shuffleList(validators, randomness) + assert.Equal(t, validatorsCopy, validators) +} + +func Test_shuffleListConsistentShuffling(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + validators := generateValidatorList(30) + + nbTrials := 10 + shuffled := shuffleList(validators, randomness) + for i := 0; i < nbTrials; i++ { + shuffled2 := shuffleList(validators, randomness) + assert.Equal(t, shuffled, shuffled2) + } +} + +func Test_distributeValidatorsEqualNumber(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + nodesPerShard := 30 + newNodesPerShard := 10 + validatorsMap := generateValidatorMap(nodesPerShard, 2) + validatorsCopy := copyValidatorMap(validatorsMap) + + nbLists := len(validatorsMap) + validatorsToDistribute := generateValidatorList(nbLists * newNodesPerShard) + distributeValidators(validatorsToDistribute, validatorsMap, randomness, uint32(newNodesPerShard+1)) + testDistributeValidators(t, validatorsCopy, validatorsMap, validatorsToDistribute) +} + +func Test_distributeValidatorsEqualNumberConsistent(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + nodesPerShard := 30 + newNodesPerShard := 10 + validatorsMap := generateValidatorMap(nodesPerShard, 2) + validatorsCopy := copyValidatorMap(validatorsMap) + + nbLists := len(validatorsMap) + validatorsToDistribute := generateValidatorList(nbLists * newNodesPerShard) + distributeValidators(validatorsToDistribute, validatorsMap, randomness, uint32(newNodesPerShard+1)) + testDistributeValidators(t, validatorsCopy, validatorsMap, validatorsToDistribute) + + distributeValidators(validatorsToDistribute, validatorsCopy, randomness, uint32(newNodesPerShard+1)) + for i := range validatorsCopy { + assert.Equal(t, validatorsMap[i], validatorsCopy[i]) + } +} + +func Test_distributeValidatorsUnequalNumber(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + nodesPerShard := 30 + nbShards := uint32(2) + validatorsMap := generateValidatorMap(nodesPerShard, nbShards) + validatorsCopy := copyValidatorMap(validatorsMap) + + nbLists := len(validatorsMap) + maxNewNodesPerShard := 10 + newNodes := nbLists*maxNewNodesPerShard - 1 + validatorsToDistribute := generateValidatorList(nbLists*newNodes - 1) + distributeValidators(validatorsToDistribute, validatorsMap, randomness, nbShards+1) + testDistributeValidators(t, validatorsCopy, validatorsMap, validatorsToDistribute) +} + +func Test_distributeValidatorsUnequalNumberConsistent(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + nodesPerShard := 30 + nbShards := uint32(2) + validatorsMap := generateValidatorMap(nodesPerShard, nbShards) + validatorsCopy := copyValidatorMap(validatorsMap) + + nbLists := len(validatorsMap) + maxNewNodesPerShard := 10 + newNodes := nbLists*maxNewNodesPerShard - 1 + validatorsToDistribute := generateValidatorList(nbLists*newNodes - 1) + distributeValidators(validatorsToDistribute, validatorsMap, randomness, nbShards+1) + testDistributeValidators(t, validatorsCopy, validatorsMap, validatorsToDistribute) + + distributeValidators(validatorsToDistribute, validatorsCopy, randomness, nbShards+1) + for i := range validatorsCopy { + assert.Equal(t, validatorsMap[i], validatorsCopy[i]) + } +} + +func Test_shuffleOutNodesNoLeaving(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + eligibleNodesPerShard := 100 + waitingNodesPerShard := 40 + nbShards := uint32(2) + var leaving []Validator + + eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) + waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) + + shuffledOut, newEligible, newLeaving := shuffleOutNodes(eligibleMap, waitingMap, leaving, randomness) + testShuffledOut(t, eligibleMap, waitingMap, newEligible, shuffledOut, leaving, newLeaving) +} + +func Test_shuffleOutNodesWithLeaving(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + eligibleNodesPerShard := 100 + waitingNodesPerShard := 40 + nbShards := uint32(2) + leaving := make([]Validator, 0) + + eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) + waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) + for _, valList := range eligibleMap { + leaving = append(leaving, valList[:len(valList)/5]...) + } + + shuffledOut, newEligible, newLeaving := shuffleOutNodes(eligibleMap, waitingMap, leaving, randomness) + testShuffledOut(t, eligibleMap, waitingMap, newEligible, shuffledOut, leaving, newLeaving) +} + +func Test_shuffleOutNodesWithLeavingMoreThanWaiting(t *testing.T) { + t.Parallel() + + randomness := generateRandomByteArray(32) + eligibleNodesPerShard := 100 + waitingNodesPerShard := 40 + nbShards := uint32(2) + leaving := make([]Validator, 0) + + eligibleMap := generateValidatorMap(eligibleNodesPerShard, nbShards) + waitingMap := generateValidatorMap(waitingNodesPerShard, nbShards) + for _, valList := range eligibleMap { + leaving = append(leaving, valList[:len(valList)/2]...) + } + + shuffledOut, newEligible, newLeaving := shuffleOutNodes(eligibleMap, waitingMap, leaving, randomness) + testShuffledOut(t, eligibleMap, waitingMap, newEligible, shuffledOut, leaving, newLeaving) +} + +func TestNewXorValidatorsShuffler(t *testing.T) { + t.Parallel() + + shuffler := NewXorValidatorsShuffler( + 100, + 100, + 0.2, + false, + ) + + assert.NotNil(t, shuffler) +} + +func TestRandXORShuffler_computeNewShardsNotChanging(t *testing.T) { + t.Parallel() + + currentNbShards := uint32(3) + shuffler := createDefaultXorShuffler() + eligible := generateValidatorMap(int(shuffler.nodesShard), currentNbShards) + nbShards := currentNbShards + 1 // account for meta + maxNodesNoSplit := (nbShards + 1) * (shuffler.nodesShard + shuffler.shardHysteresis) + nbWaitingPerShard := int(maxNodesNoSplit/nbShards - shuffler.nodesShard) + waiting := generateValidatorMap(nbWaitingPerShard, currentNbShards) + newNodes := generateValidatorList(0) + leaving := generateValidatorList(0) + + newNbShards := shuffler.computeNewShards(eligible, waiting, newNodes, leaving, currentNbShards) + assert.Equal(t, currentNbShards, newNbShards) +} + +func TestRandXORShuffler_computeNewShardsWithSplit(t *testing.T) { + t.Parallel() + + currentNbShards := uint32(3) + shuffler := createDefaultXorShuffler() + eligible := generateValidatorMap(int(shuffler.nodesShard), currentNbShards) + nbShards := currentNbShards + 1 // account for meta + maxNodesNoSplit := (nbShards + 1) * (shuffler.nodesShard + shuffler.shardHysteresis) + nbWaitingPerShard := int(maxNodesNoSplit/nbShards-shuffler.nodesShard) + 1 + waiting := generateValidatorMap(nbWaitingPerShard, currentNbShards) + newNodes := generateValidatorList(0) + leaving := generateValidatorList(0) + + newNbShards := shuffler.computeNewShards(eligible, waiting, newNodes, leaving, currentNbShards) + assert.Equal(t, currentNbShards+1, newNbShards) +} + +func TestRandXORShuffler_computeNewShardsWithMerge(t *testing.T) { + t.Parallel() + + currentNbShards := uint32(3) + shuffler := createDefaultXorShuffler() + eligible := generateValidatorMap(int(shuffler.nodesShard), currentNbShards) + nbWaitingPerShard := 0 + waiting := generateValidatorMap(nbWaitingPerShard, currentNbShards) + newNodes := generateValidatorList(0) + leaving := generateValidatorList(1) + + newNbShards := shuffler.computeNewShards(eligible, waiting, newNodes, leaving, currentNbShards) + assert.Equal(t, currentNbShards-1, newNbShards) +} + +func TestRandXORShuffler_UpdateParams(t *testing.T) { + t.Parallel() + + shuffler := createDefaultXorShuffler() + shuffler2 := &randXORShuffler{ + nodesShard: 200, + nodesMeta: 200, + shardHysteresis: 0, + metaHysteresis: 0, + adaptivity: true, + } + + shuffler.UpdateParams( + shuffler2.nodesShard, + shuffler2.nodesMeta, + 0, + shuffler2.adaptivity, + ) + + assert.Equal(t, shuffler2, shuffler) +} + +func TestRandXORShuffler_UpdateNodeListsNoReSharding(t *testing.T) { + t.Parallel() + + shuffler := createDefaultXorShuffler() + + eligiblePerShard := int(shuffler.nodesShard) + waitingPerShard := 30 + nbShards := uint32(3) + randomness := generateRandomByteArray(32) + + leavingNodes := make([]Validator, 0) + newNodes := make([]Validator, 0) + + eligibleMap := generateValidatorMap(eligiblePerShard, nbShards) + waitingMap := generateValidatorMap(waitingPerShard, nbShards) + + args := ArgsUpdateNodes{ + eligible: eligibleMap, + waiting: waitingMap, + newNodes: newNodes, + leaving: leavingNodes, + rand: randomness, + } + + eligible, waiting, _ := shuffler.UpdateNodeLists(args) + + allPrevEligible := getValidatorsInMap(eligibleMap) + allNewEligible := getValidatorsInMap(eligible) + allPrevWaiting := getValidatorsInMap(waitingMap) + allNewWaiting := getValidatorsInMap(waiting) + + assert.Equal(t, len(allPrevEligible)+len(allPrevWaiting), len(allNewEligible)+len(allNewWaiting)) +} diff --git a/statusHandler/export_test.go b/statusHandler/export_test.go deleted file mode 100644 index f7fe9bc9458..00000000000 --- a/statusHandler/export_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package statusHandler - -import ( - "errors" - - "github.com/prometheus/client_golang/prometheus" -) - -func (psh *PrometheusStatusHandler) GetPrometheusMetricByKey(key string) (prometheus.Gauge, error) { - value, ok := psh.prometheusGaugeMetrics.Load(key) - if ok { - return value.(prometheus.Gauge), nil - } - return nil, errors.New("metric does not exist") -} diff --git a/statusHandler/presenter/blockInfoGetters.go b/statusHandler/presenter/blockInfoGetters.go index aa6adcd9d8b..bca03bcdc9e 100644 --- a/statusHandler/presenter/blockInfoGetters.go +++ b/statusHandler/presenter/blockInfoGetters.go @@ -32,6 +32,11 @@ func (psh *PresenterStatusHandler) GetCurrentBlockHash() string { return psh.getFromCacheAsString(core.MetricCurrentBlockHash) } +// GetEpochNumber will return current epoch +func (psh *PresenterStatusHandler) GetEpochNumber() uint64 { + return psh.getFromCacheAsUint64(core.MetricEpochNumber) +} + // GetCurrentRoundTimestamp will return current round timestamp func (psh *PresenterStatusHandler) GetCurrentRoundTimestamp() uint64 { return psh.getFromCacheAsUint64(core.MetricCurrentRoundTimestamp) diff --git a/statusHandler/prometheusHandler.go b/statusHandler/prometheusHandler.go deleted file mode 100644 index ca165fa7207..00000000000 --- a/statusHandler/prometheusHandler.go +++ /dev/null @@ -1,108 +0,0 @@ -package statusHandler - -import ( - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/prometheus/client_golang/prometheus" -) - -// PrometheusStatusHandler will define the handler which will update prometheus metrics -type PrometheusStatusHandler struct { - prometheusGaugeMetrics sync.Map -} - -// InitMetricsMap will init the map of prometheus metrics -func (psh *PrometheusStatusHandler) InitMetricsMap() { - psh.prometheusGaugeMetrics = sync.Map{} -} - -// will create a prometheus gauge and add it to the sync map -func (psh *PrometheusStatusHandler) addMetric(name string, help string) { - metric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: name, - Help: help, - }) - psh.prometheusGaugeMetrics.Store(name, metric) -} - -// InitMetrics will declare and init all the metrics which should be used for Prometheus -func (psh *PrometheusStatusHandler) InitMetrics() { - psh.InitMetricsMap() - - psh.addMetric(core.MetricSynchronizedRound, "The round where the synchronized blockchain is") - psh.addMetric(core.MetricNonce, "The nonce for the node") - psh.addMetric(core.MetricCurrentRound, "The current round where the node is") - psh.addMetric(core.MetricNumConnectedPeers, "The current number of peers connected") - psh.addMetric(core.MetricIsSyncing, "The synchronization state. If it's in process of syncing will be 1"+ - " and if it's synchronized will be 0") - - psh.prometheusGaugeMetrics.Range(func(key, value interface{}) bool { - gauge := value.(prometheus.Gauge) - _ = prometheus.Register(gauge) - return true - }) -} - -// NewPrometheusStatusHandler will return an instance of a PrometheusStatusHandler -func NewPrometheusStatusHandler() *PrometheusStatusHandler { - psh := new(PrometheusStatusHandler) - psh.InitMetrics() - return psh -} - -// IsInterfaceNil returns true if there is no value under the interface -func (psh *PrometheusStatusHandler) IsInterfaceNil() bool { - if psh == nil { - return true - } - return false -} - -// Increment will be used for incrementing the value for a key -func (psh *PrometheusStatusHandler) Increment(key string) { - if metric, ok := psh.prometheusGaugeMetrics.Load(key); ok { - metric.(prometheus.Gauge).Inc() - } -} - -// AddUint64 will be used for increase the value for a key with a specific value -func (psh *PrometheusStatusHandler) AddUint64(key string, val uint64) { - if metric, ok := psh.prometheusGaugeMetrics.Load(key); ok { - metric.(prometheus.Gauge).Add(float64(val)) - } -} - -// Decrement will be used for decrementing the value for a key -func (psh *PrometheusStatusHandler) Decrement(key string) { - if metric, ok := psh.prometheusGaugeMetrics.Load(key); ok { - metric.(prometheus.Gauge).Dec() - } -} - -// SetInt64Value method - will update the value for a key -func (psh *PrometheusStatusHandler) SetInt64Value(key string, value int64) { - if metric, ok := psh.prometheusGaugeMetrics.Load(key); ok { - metric.(prometheus.Gauge).Set(float64(value)) - } -} - -// SetUInt64Value method - will update the value for a key -func (psh *PrometheusStatusHandler) SetUInt64Value(key string, value uint64) { - if metric, ok := psh.prometheusGaugeMetrics.Load(key); ok { - metric.(prometheus.Gauge).Set(float64(value)) - } -} - -// SetStringValue method - will update the value for a key -func (psh *PrometheusStatusHandler) SetStringValue(key string, value string) { -} - -// Close will unregister Prometheus metrics -func (psh *PrometheusStatusHandler) Close() { - psh.prometheusGaugeMetrics.Range(func(key, value interface{}) bool { - gauge := value.(prometheus.Gauge) - prometheus.Unregister(gauge) - return true - }) -} diff --git a/statusHandler/prometheusHandler_test.go b/statusHandler/prometheusHandler_test.go deleted file mode 100644 index afad1eb6c2a..00000000000 --- a/statusHandler/prometheusHandler_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package statusHandler_test - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/statusHandler" - "github.com/prometheus/client_golang/prometheus/promhttp" - prometheusUtils "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/stretchr/testify/assert" -) - -func TestPrometheusStatusHandler_NewPrometheusStatusHandler(t *testing.T) { - t.Parallel() - - var promStatusHandler core.AppStatusHandler - promStatusHandler = statusHandler.NewPrometheusStatusHandler() - assert.NotNil(t, promStatusHandler) -} - -func TestPrometheusStatusHandler_TestIfMetricsAreInitialized(t *testing.T) { - t.Parallel() - - promStatusHandler := statusHandler.NewPrometheusStatusHandler() - - // check if nonce metric for example was initialized - _, err := promStatusHandler.GetPrometheusMetricByKey(core.MetricNumConnectedPeers) - assert.Nil(t, err) -} - -func TestPrometheusStatusHandler_TestIncrement(t *testing.T) { - t.Parallel() - - var metricKey = core.MetricNonce - - promStatusHandler := statusHandler.NewPrometheusStatusHandler() - - // increment the nonce metric - promStatusHandler.Increment(metricKey) - - // get the gauge - gauge, err := promStatusHandler.GetPrometheusMetricByKey(metricKey) - assert.Nil(t, err) - - result := prometheusUtils.ToFloat64(gauge) - // test if the metric was incremented - assert.Equal(t, float64(1), result) -} - -func TestPrometheusStatusHandler_TestDecrement(t *testing.T) { - t.Parallel() - - var metricKey = core.MetricNonce - - promStatusHandler := statusHandler.NewPrometheusStatusHandler() - - // get the gauge - gauge, err := promStatusHandler.GetPrometheusMetricByKey(metricKey) - assert.Nil(t, err) - - // now decrement the metric - promStatusHandler.Decrement(metricKey) - - result := prometheusUtils.ToFloat64(gauge) - - assert.Equal(t, float64(-1), result) -} - -func TestPrometheusStatusHandler_TestSetInt64Value(t *testing.T) { - t.Parallel() - - var metricKey = core.MetricCurrentRound - - promStatusHandler := statusHandler.NewPrometheusStatusHandler() - - // set an int64 value - promStatusHandler.SetInt64Value(metricKey, int64(10)) - - gauge, err := promStatusHandler.GetPrometheusMetricByKey(metricKey) - assert.Nil(t, err) - - result := prometheusUtils.ToFloat64(gauge) - // test if the metric value was updated - assert.Equal(t, float64(10), result) -} - -func TestPrometheusStatusHandler_TestSetUInt64Value(t *testing.T) { - t.Parallel() - - var metricKey = core.MetricCurrentRound - - promStatusHandler := statusHandler.NewPrometheusStatusHandler() - - // set an uint64 value - promStatusHandler.SetUInt64Value(metricKey, uint64(20)) - - gauge, err := promStatusHandler.GetPrometheusMetricByKey(metricKey) - assert.Nil(t, err) - - result := prometheusUtils.ToFloat64(gauge) - // test if the metric value was updated - assert.Equal(t, float64(20), result) -} - -func BenchmarkPrometheusStatusHandler_Increment(b *testing.B) { - var promStatusHandler core.AppStatusHandler - promStatusHandler = statusHandler.NewPrometheusStatusHandler() - - testServer := httptest.NewServer(promhttp.Handler()) - defer testServer.Close() - b.ResetTimer() - - _, err := http.Get(testServer.URL) - assert.Nil(b, err) - - for n := 0; n < b.N; n++ { - promStatusHandler.Increment(core.MetricIsSyncing) - } - promStatusHandler.Close() -} - -func BenchmarkPrometheusStatusHandler_Decrement(b *testing.B) { - var promStatusHandler core.AppStatusHandler - promStatusHandler = statusHandler.NewPrometheusStatusHandler() - - testServer := httptest.NewServer(promhttp.Handler()) - defer testServer.Close() - b.ResetTimer() - - _, err := http.Get(testServer.URL) - assert.Nil(b, err) - - for n := 0; n < b.N; n++ { - promStatusHandler.Decrement(core.MetricIsSyncing) - } - promStatusHandler.Close() -} - -func BenchmarkPrometheusStatusHandler_SetInt64Value(b *testing.B) { - var promStatusHandler core.AppStatusHandler - - promStatusHandler = statusHandler.NewPrometheusStatusHandler() - - testServer := httptest.NewServer(promhttp.Handler()) - defer testServer.Close() - b.ResetTimer() - - _, err := http.Get(testServer.URL) - assert.Nil(b, err) - - for n := 0; n < b.N; n++ { - promStatusHandler.SetInt64Value(core.MetricIsSyncing, int64(10)) - } - promStatusHandler.Close() -} - -func BenchmarkPrometheusStatusHandler_SetUInt64Value(b *testing.B) { - var promStatusHandler core.AppStatusHandler - promStatusHandler = statusHandler.NewPrometheusStatusHandler() - - testServer := httptest.NewServer(promhttp.Handler()) - defer testServer.Close() - b.ResetTimer() - - _, err := http.Get(testServer.URL) - assert.Nil(b, err) - - for n := 0; n < b.N; n++ { - promStatusHandler.SetUInt64Value(core.MetricIsSyncing, uint64(10)) - } - promStatusHandler.Close() -} diff --git a/statusHandler/view/interface.go b/statusHandler/view/interface.go index 9fbd33d483a..a9c74faa772 100644 --- a/statusHandler/view/interface.go +++ b/statusHandler/view/interface.go @@ -40,6 +40,7 @@ type Presenter interface { GetLogLines() []string GetNumTxProcessed() uint64 GetCurrentBlockHash() string + GetEpochNumber() uint64 CalculateTimeToSynchronize() string CalculateSynchronizationSpeed() uint64 GetCurrentRoundTimestamp() uint64 diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index e10f3ced444..2ae27719b6c 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -222,27 +222,27 @@ func (wr *WidgetsRender) prepareChainInfo() { rows[3] = []string{fmt.Sprintf("Number of transactions processed: %d", numTxProcessed)} } + epoch := wr.presenter.GetEpochNumber() + rows[4] = []string{fmt.Sprintf("Current epoch: %d", epoch)} + nonce := wr.presenter.GetNonce() probableHighestNonce := wr.presenter.GetProbableHighestNonce() - rows[4] = []string{fmt.Sprintf("Current synchronized block nonce: %d / %d", + rows[5] = []string{fmt.Sprintf("Current synchronized block nonce: %d / %d", nonce, probableHighestNonce)} synchronizedRound := wr.presenter.GetSynchronizedRound() currentRound := wr.presenter.GetCurrentRound() - rows[5] = []string{fmt.Sprintf("Current consensus round: %d / %d", + rows[6] = []string{fmt.Sprintf("Current consensus round: %d / %d", synchronizedRound, currentRound)} consensusRoundTime := wr.presenter.GetRoundTime() - rows[6] = []string{fmt.Sprintf("Consensus round time: %ds", consensusRoundTime)} + rows[7] = []string{fmt.Sprintf("Consensus round time: %ds", consensusRoundTime)} + numConnectedPeers := wr.presenter.GetNumConnectedPeers() numLiveValidators := wr.presenter.GetLiveValidatorNodes() - rows[7] = []string{fmt.Sprintf("Live validator nodes: %d", numLiveValidators)} - numConnectedNodes := wr.presenter.GetConnectedNodes() - rows[8] = []string{fmt.Sprintf("Network connected nodes: %d", numConnectedNodes)} - - numConnectedPeers := wr.presenter.GetNumConnectedPeers() - rows[9] = []string{fmt.Sprintf("This node is connected to %d peers", numConnectedPeers)} + rows[8] = []string{fmt.Sprintf("Peers / Validators / Nodes: %d / %d / %d", + numConnectedPeers, numLiveValidators, numConnectedNodes)} wr.chainInfo.Title = "Chain info" wr.chainInfo.RowSeparator = false @@ -273,7 +273,7 @@ func (wr *WidgetsRender) prepareBlockInfo() { shardId := wr.presenter.GetShardId() if shardId != uint64(sharding.MetachainShardId) { highestFinalBlockInShard := wr.presenter.GetHighestFinalBlockInShard() - rows[4][0] += fmt.Sprintf(" ,final nonce: %d", highestFinalBlockInShard) + rows[4][0] += fmt.Sprintf(", final nonce: %d", highestFinalBlockInShard) } consensusState := wr.presenter.GetConsensusState() diff --git a/storage/badgerdb/badgerdb.go b/storage/badgerdb/badgerdb.go index 08d48f7a2a1..54aa51463ff 100644 --- a/storage/badgerdb/badgerdb.go +++ b/storage/badgerdb/badgerdb.go @@ -208,6 +208,11 @@ func (s *DB) Destroy() error { return err } +// DestroyClosed removes the already closed storage medium stored data +func (s *DB) DestroyClosed() error { + return os.RemoveAll(s.path) +} + // IsInterfaceNil returns true if there is no value under the interface func (s *DB) IsInterfaceNil() bool { if s == nil { diff --git a/storage/boltdb/boltdb.go b/storage/boltdb/boltdb.go index 7056636c32b..a06da8f4b93 100644 --- a/storage/boltdb/boltdb.go +++ b/storage/boltdb/boltdb.go @@ -140,6 +140,11 @@ func (s *DB) Destroy() error { return err } +// DestroyClosed removes the already closed storage medium stored data +func (s *DB) DestroyClosed() error { + return os.RemoveAll(s.path) +} + // IsInterfaceNil returns true if there is no value under the interface func (s *DB) IsInterfaceNil() bool { if s == nil { diff --git a/storage/errors.go b/storage/errors.go index 64c322fb07b..df5a356e1f0 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -39,3 +39,42 @@ var ErrDuplicateKeyToAdd = errors.New("the key can not be added as it already ex // ErrEmptyKey is raised when a key is empty var ErrEmptyKey = errors.New("key is empty") + +// ErrInvalidNumberOfPersisters signals that an invalid number of persisters has been provided +var ErrInvalidNumberOfPersisters = errors.New("invalid number of active persisters") + +// ErrNilEpochStartNotifier signals that a nil epoch start notifier has been provided +var ErrNilEpochStartNotifier = errors.New("nil epoch start notifier") + +// ErrNilPersisterFactory signals that a nil persister factory has been provided +var ErrNilPersisterFactory = errors.New("nil persister factory") + +// ErrDestroyingUnit signals that the destroy unit method did not manage to destroy all the persisters in a pruning storer +var ErrDestroyingUnit = errors.New("destroy unit didn't remove all the persisters") + +// ErrNilConfig signals that a nil configuration has been received +var ErrNilConfig = errors.New("nil config") + +// ErrNilShardCoordinator signals that a nil shard coordinator has been provided +var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrNilPathManager signals that a nil path manager has been provided +var ErrNilPathManager = errors.New("nil path manager") + +// ErrEmptyPruningPathTemplate signals that an empty path template for pruning storers has been provided +var ErrEmptyPruningPathTemplate = errors.New("empty path template for pruning storers") + +// ErrEmptyStaticPathTemplate signals that an empty path template for static storers has been provided +var ErrEmptyStaticPathTemplate = errors.New("empty path template for static storers") + +// ErrInvalidPruningPathTemplate signals that an invalid path template for pruning storers has been provided +var ErrInvalidPruningPathTemplate = errors.New("invalid path template for pruning storers") + +// ErrInvalidStaticPathTemplate signals that an invalid path template for static storers has been provided +var ErrInvalidStaticPathTemplate = errors.New("invalid path template for static storers") + +// ErrInvalidNumberOfEpochsToSave signals that an invalid number of epochs to save has been provided +var ErrInvalidNumberOfEpochsToSave = errors.New("invalid number of epochs to save") + +// ErrInvalidNumberOfActivePersisters signals that an invalid number of active persisters has been provided +var ErrInvalidNumberOfActivePersisters = errors.New("invalid number of active persisters") diff --git a/storage/factory/common.go b/storage/factory/common.go new file mode 100644 index 00000000000..e68d00b407f --- /dev/null +++ b/storage/factory/common.go @@ -0,0 +1,43 @@ +package factory + +import ( + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// GetCacherFromConfig will return the cache config needed for storage unit from a config came from the toml file +func GetCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { + return storageUnit.CacheConfig{ + Size: cfg.Size, + Type: storageUnit.CacheType(cfg.Type), + Shards: cfg.Shards, + } +} + +// GetDBFromConfig will return the db config needed for storage unit from a config came from the toml file +func GetDBFromConfig(cfg config.DBConfig) storageUnit.DBConfig { + return storageUnit.DBConfig{ + Type: storageUnit.DBType(cfg.Type), + MaxBatchSize: cfg.MaxBatchSize, + BatchDelaySeconds: cfg.BatchDelaySeconds, + MaxOpenFiles: cfg.MaxOpenFiles, + } +} + +// GetBloomFromConfig will return the bloom config needed for storage unit from a config came from the toml file +func GetBloomFromConfig(cfg config.BloomFilterConfig) storageUnit.BloomConfig { + var hashFuncs []storageUnit.HasherType + if cfg.HashFunc != nil { + hashFuncs = make([]storageUnit.HasherType, len(cfg.HashFunc)) + idx := 0 + for _, hf := range cfg.HashFunc { + hashFuncs[idx] = storageUnit.HasherType(hf) + idx++ + } + } + + return storageUnit.BloomConfig{ + Size: cfg.Size, + HashFunc: hashFuncs, + } +} diff --git a/storage/factory/persisterFactory.go b/storage/factory/persisterFactory.go new file mode 100644 index 00000000000..32c9f4aeebb --- /dev/null +++ b/storage/factory/persisterFactory.go @@ -0,0 +1,55 @@ +package factory + +import ( + "errors" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/badgerdb" + "github.com/ElrondNetwork/elrond-go/storage/boltdb" + "github.com/ElrondNetwork/elrond-go/storage/leveldb" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// PersisterFactory is the factory which will handle creating new databases +type PersisterFactory struct { + dbType string + batchDelaySeconds int + maxBatchSize int + maxOpenFiles int +} + +// NewPersisterFactory will return a new instance of a PersisterFactory +func NewPersisterFactory(config config.DBConfig) *PersisterFactory { + return &PersisterFactory{ + dbType: config.Type, + batchDelaySeconds: config.BatchDelaySeconds, + maxBatchSize: config.MaxBatchSize, + maxOpenFiles: config.MaxOpenFiles, + } +} + +// Create will return a new instance of a DB with a given path +func (df *PersisterFactory) Create(path string) (storage.Persister, error) { + if len(path) < 0 { + return nil, errors.New("invalid file path") + } + + switch storageUnit.DBType(df.dbType) { + case storageUnit.LvlDB: + return leveldb.NewDB(path, df.batchDelaySeconds, df.maxBatchSize, df.maxOpenFiles) + case storageUnit.LvlDbSerial: + return leveldb.NewSerialDB(path, df.batchDelaySeconds, df.maxBatchSize, df.maxOpenFiles) + case storageUnit.BadgerDB: + return badgerdb.NewDB(path, df.batchDelaySeconds, df.maxBatchSize) + case storageUnit.BoltDB: + return boltdb.NewDB(path, df.batchDelaySeconds, df.maxBatchSize) + default: + return nil, storage.ErrNotSupportedDBType + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pf *PersisterFactory) IsInterfaceNil() bool { + return pf == nil +} diff --git a/storage/factory/pruningStorerFactory.go b/storage/factory/pruningStorerFactory.go new file mode 100644 index 00000000000..18464265fef --- /dev/null +++ b/storage/factory/pruningStorerFactory.go @@ -0,0 +1,385 @@ +package factory + +import ( + "path/filepath" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/pruning" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +var log = logger.GetOrCreate("storage/factory") + +const ( + minimumNumberOfActivePersisters = 1 + minimumNumberOfEpochsToKeep = 2 +) + +// StorageServiceFactory handles the creation of storage services for both meta and shards +type StorageServiceFactory struct { + generalConfig *config.Config + shardCoordinator sharding.Coordinator + pathManager storage.PathManagerHandler + epochStartNotifier storage.EpochStartNotifier + currentEpoch uint32 +} + +// NewStorageServiceFactory will return a new instance of StorageServiceFactory +func NewStorageServiceFactory( + config *config.Config, + shardCoordinator sharding.Coordinator, + pathManager storage.PathManagerHandler, + epochStartNotifier storage.EpochStartNotifier, + currentEpoch uint32, +) (*StorageServiceFactory, error) { + if config == nil { + return nil, storage.ErrNilConfig + } + if config.StoragePruning.NumEpochsToKeep < minimumNumberOfEpochsToKeep && !config.StoragePruning.FullArchive { + return nil, storage.ErrInvalidNumberOfEpochsToSave + } + if config.StoragePruning.NumActivePersisters < minimumNumberOfActivePersisters { + return nil, storage.ErrInvalidNumberOfActivePersisters + } + if check.IfNil(shardCoordinator) { + return nil, storage.ErrNilShardCoordinator + } + if check.IfNil(pathManager) { + return nil, storage.ErrNilPathManager + } + if check.IfNil(epochStartNotifier) { + return nil, storage.ErrNilEpochStartNotifier + } + + return &StorageServiceFactory{ + generalConfig: config, + shardCoordinator: shardCoordinator, + pathManager: pathManager, + epochStartNotifier: epochStartNotifier, + currentEpoch: currentEpoch, + }, nil +} + +// CreateForShard will return the storage service which contains all storers needed for a shard +func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService, error) { + var headerUnit *pruning.PruningStorer + var peerBlockUnit *pruning.PruningStorer + var miniBlockUnit *pruning.PruningStorer + var txUnit *pruning.PruningStorer + var metachainHeaderUnit *pruning.PruningStorer + var unsignedTxUnit *pruning.PruningStorer + var rewardTxUnit *pruning.PruningStorer + var metaHdrHashNonceUnit *pruning.PruningStorer + var shardHdrHashNonceUnit *pruning.PruningStorer + var bootstrapUnit *pruning.PruningStorer + var err error + + successfullyCreatedStorers := make([]storage.Storer, 0) + defer func() { + // cleanup + if err != nil { + for _, storer := range successfullyCreatedStorers { + _ = storer.DestroyUnit() + } + } + }() + + txUnitStorerArgs := psf.createPruningStorerArgs(psf.generalConfig.TxStorage) + txUnit, err = pruning.NewPruningStorer(txUnitStorerArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, txUnit) + + unsignedTxUnitStorerArgs := psf.createPruningStorerArgs(psf.generalConfig.UnsignedTransactionStorage) + unsignedTxUnit, err = pruning.NewPruningStorer(unsignedTxUnitStorerArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, unsignedTxUnit) + + rewardTxUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.RewardTxStorage) + rewardTxUnit, err = pruning.NewPruningStorer(rewardTxUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, rewardTxUnit) + + miniBlockUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MiniBlocksStorage) + miniBlockUnit, err = pruning.NewPruningStorer(miniBlockUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, miniBlockUnit) + + peerBlockUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.PeerBlockBodyStorage) + peerBlockUnit, err = pruning.NewPruningStorer(peerBlockUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, peerBlockUnit) + + headerUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.BlockHeaderStorage) + headerUnit, err = pruning.NewPruningStorer(headerUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, headerUnit) + + metaChainHeaderUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MetaBlockStorage) + metachainHeaderUnit, err = pruning.NewPruningStorer(metaChainHeaderUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, metachainHeaderUnit) + + metaHdrHashNonceUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MetaHdrNonceHashStorage) + metaHdrHashNonceUnit, err = pruning.NewPruningStorer(metaHdrHashNonceUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, metaHdrHashNonceUnit) + + shardHdrHashNonceUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.ShardHdrNonceHashStorage) + shardHdrHashNonceUnit, err = pruning.NewShardedPruningStorer(shardHdrHashNonceUnitArgs, psf.shardCoordinator.SelfId()) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, shardHdrHashNonceUnit) + + heartbeatDbConfig := GetDBFromConfig(psf.generalConfig.Heartbeat.HeartbeatStorage.DB) + shardId := core.GetShardIdString(psf.shardCoordinator.SelfId()) + dbPath := psf.pathManager.PathForStatic(shardId, psf.generalConfig.Heartbeat.HeartbeatStorage.DB.FilePath) + heartbeatDbConfig.FilePath = dbPath + heartbeatStorageUnit, err := storageUnit.NewStorageUnitFromConf( + GetCacherFromConfig(psf.generalConfig.Heartbeat.HeartbeatStorage.Cache), + heartbeatDbConfig, + GetBloomFromConfig(psf.generalConfig.Heartbeat.HeartbeatStorage.Bloom)) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, heartbeatStorageUnit) + + statusMetricsDbConfig := GetDBFromConfig(psf.generalConfig.StatusMetricsStorage.DB) + shardId = core.GetShardIdString(psf.shardCoordinator.SelfId()) + dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) + statusMetricsDbConfig.FilePath = dbPath + statusMetricsStorageUnit, err := storageUnit.NewStorageUnitFromConf( + GetCacherFromConfig(psf.generalConfig.StatusMetricsStorage.Cache), + statusMetricsDbConfig, + GetBloomFromConfig(psf.generalConfig.StatusMetricsStorage.Bloom)) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, statusMetricsStorageUnit) + + bootstrapUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.BootstrapStorage) + bootstrapUnit, err = pruning.NewPruningStorer(bootstrapUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, bootstrapUnit) + + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.TransactionUnit, txUnit) + store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) + store.AddStorer(dataRetriever.PeerChangesUnit, peerBlockUnit) + store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) + store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) + store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(psf.shardCoordinator.SelfId()) + store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) + store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) + store.AddStorer(dataRetriever.BootstrapUnit, bootstrapUnit) + store.AddStorer(dataRetriever.StatusMetricsUnit, statusMetricsStorageUnit) + + return store, err +} + +// CreateForMeta will return the storage service which contains all storers needed for metachain +func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, error) { + var shardDataUnit *pruning.PruningStorer + var metaBlockUnit *pruning.PruningStorer + var headerUnit *pruning.PruningStorer + var txUnit *pruning.PruningStorer + var peerDataUnit *pruning.PruningStorer + var metaHdrHashNonceUnit *pruning.PruningStorer + var miniBlockUnit *pruning.PruningStorer + var unsignedTxUnit *pruning.PruningStorer + var miniBlockHeadersUnit *pruning.PruningStorer + var shardHdrHashNonceUnits []*pruning.PruningStorer + var bootstrapUnit *pruning.PruningStorer + var err error + + successfullyCreatedStorers := make([]storage.Storer, 0) + + defer func() { + // cleanup + if err != nil { + log.Error("create meta store", "error", err.Error()) + for _, storer := range successfullyCreatedStorers { + _ = storer.DestroyUnit() + } + } + }() + + metaBlockUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MetaBlockStorage) + metaBlockUnit, err = pruning.NewPruningStorer(metaBlockUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, metaBlockUnit) + + shardDataUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.ShardDataStorage) + shardDataUnit, err = pruning.NewPruningStorer(shardDataUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, shardDataUnit) + + peerDataUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.PeerDataStorage) + peerDataUnit, err = pruning.NewPruningStorer(peerDataUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, peerDataUnit) + + headerUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.BlockHeaderStorage) + headerUnit, err = pruning.NewPruningStorer(headerUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, headerUnit) + + metaHdrHashNonceUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MetaHdrNonceHashStorage) + metaHdrHashNonceUnit, err = pruning.NewPruningStorer(metaHdrHashNonceUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, metaHdrHashNonceUnit) + + shardHdrHashNonceUnits = make([]*pruning.PruningStorer, psf.shardCoordinator.NumberOfShards()) + for i := uint32(0); i < psf.shardCoordinator.NumberOfShards(); i++ { + shardHdrHashNonceUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.ShardHdrNonceHashStorage) + shardHdrHashNonceUnits[i], err = pruning.NewShardedPruningStorer(shardHdrHashNonceUnitArgs, i) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, shardHdrHashNonceUnits[i]) + } + + shardId := core.GetShardIdString(psf.shardCoordinator.SelfId()) + heartbeatDbConfig := GetDBFromConfig(psf.generalConfig.Heartbeat.HeartbeatStorage.DB) + dbPath := psf.pathManager.PathForStatic(shardId, psf.generalConfig.Heartbeat.HeartbeatStorage.DB.FilePath) + heartbeatDbConfig.FilePath = dbPath + heartbeatStorageUnit, err := storageUnit.NewStorageUnitFromConf( + GetCacherFromConfig(psf.generalConfig.Heartbeat.HeartbeatStorage.Cache), + heartbeatDbConfig, + GetBloomFromConfig(psf.generalConfig.Heartbeat.HeartbeatStorage.Bloom)) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, heartbeatStorageUnit) + + statusMetricsDbConfig := GetDBFromConfig(psf.generalConfig.StatusMetricsStorage.DB) + shardId = core.GetShardIdString(psf.shardCoordinator.SelfId()) + dbPath = psf.pathManager.PathForStatic(shardId, psf.generalConfig.StatusMetricsStorage.DB.FilePath) + statusMetricsDbConfig.FilePath = dbPath + statusMetricsStorageUnit, err := storageUnit.NewStorageUnitFromConf( + GetCacherFromConfig(psf.generalConfig.StatusMetricsStorage.Cache), + statusMetricsDbConfig, + GetBloomFromConfig(psf.generalConfig.StatusMetricsStorage.Bloom)) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, statusMetricsStorageUnit) + + txUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.TxStorage) + txUnit, err = pruning.NewPruningStorer(txUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, txUnit) + + unsignedTxUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.UnsignedTransactionStorage) + unsignedTxUnit, err = pruning.NewPruningStorer(unsignedTxUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, unsignedTxUnit) + + miniBlockUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MiniBlocksStorage) + miniBlockUnit, err = pruning.NewPruningStorer(miniBlockUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, miniBlockUnit) + + miniBlockHeadersUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.MiniBlockHeadersStorage) + miniBlockHeadersUnit, err = pruning.NewPruningStorer(miniBlockHeadersUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, miniBlockHeadersUnit) + + bootstrapUnitArgs := psf.createPruningStorerArgs(psf.generalConfig.BootstrapStorage) + bootstrapUnit, err = pruning.NewPruningStorer(bootstrapUnitArgs) + if err != nil { + return nil, err + } + successfullyCreatedStorers = append(successfullyCreatedStorers, bootstrapUnit) + + store := dataRetriever.NewChainStorer() + store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) + store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) + store.AddStorer(dataRetriever.MetaPeerDataUnit, peerDataUnit) + store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) + store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) + store.AddStorer(dataRetriever.TransactionUnit, txUnit) + store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) + store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) + store.AddStorer(dataRetriever.MiniBlockHeaderUnit, miniBlockHeadersUnit) + for i := uint32(0); i < psf.shardCoordinator.NumberOfShards(); i++ { + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) + store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) + } + store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) + store.AddStorer(dataRetriever.BootstrapUnit, bootstrapUnit) + store.AddStorer(dataRetriever.StatusMetricsUnit, statusMetricsStorageUnit) + + return store, err +} + +func (psf *StorageServiceFactory) createPruningStorerArgs(storageConfig config.StorageConfig) *pruning.StorerArgs { + fullArchiveMode := psf.generalConfig.StoragePruning.FullArchive + numOfEpochsToKeep := uint32(psf.generalConfig.StoragePruning.NumEpochsToKeep) + numOfActivePersisters := uint32(psf.generalConfig.StoragePruning.NumActivePersisters) + pruningEnabled := psf.generalConfig.StoragePruning.Enabled + shardId := core.GetShardIdString(psf.shardCoordinator.SelfId()) + dbPath := filepath.Join(psf.pathManager.PathForEpoch(shardId, 0, storageConfig.DB.FilePath)) + args := &pruning.StorerArgs{ + Identifier: storageConfig.DB.FilePath, + PruningEnabled: pruningEnabled, + StartingEpoch: psf.currentEpoch, + FullArchive: fullArchiveMode, + ShardCoordinator: psf.shardCoordinator, + CacheConf: GetCacherFromConfig(storageConfig.Cache), + PathManager: psf.pathManager, + DbPath: dbPath, + PersisterFactory: NewPersisterFactory(storageConfig.DB), + BloomFilterConf: GetBloomFromConfig(storageConfig.Bloom), + NumOfEpochsToKeep: numOfEpochsToKeep, + NumOfActivePersisters: numOfActivePersisters, + Notifier: psf.epochStartNotifier, + } + + return args +} diff --git a/storage/interface.go b/storage/interface.go index 9ea9b7d48de..133f8f67da5 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -1,5 +1,10 @@ package storage +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" +) + // Persister provides storage of data services in a database like construct type Persister interface { // Put add the value to the (key, val) persistence medium @@ -16,6 +21,8 @@ type Persister interface { Remove(key []byte) error // Destroy removes the persistence medium stored data Destroy() error + // DestroyClosed removes the already closed persistence medium stored data + DestroyClosed() error // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool } @@ -47,7 +54,7 @@ type Cacher interface { // the "recently used"-ness of the key. Peek(key []byte) (value interface{}, ok bool) // HasOrAdd checks if a key is in the cache without updating the - // recent-ness or deleting it for being stale, and if not, adds the value. + // recent-ness or deleting it for being stale, and if not adds the value. // Returns whether found and whether an eviction occurred. HasOrAdd(key []byte, value interface{}) (ok, evicted bool) // Remove removes the provided key from the cache. @@ -90,3 +97,18 @@ type Storer interface { DestroyUnit() error IsInterfaceNil() bool } + +// EpochStartNotifier defines which actions should be done for handling new epoch's events +type EpochStartNotifier interface { + RegisterHandler(handler notifier.SubscribeFunctionHandler) + UnregisterHandler(handler notifier.SubscribeFunctionHandler) + NotifyAll(hdr data.HeaderHandler) + IsInterfaceNil() bool +} + +// PathManagerHandler defines which actions should be done for generating paths for databases directories +type PathManagerHandler interface { + PathForEpoch(shardId string, epoch uint32, identifier string) string + PathForStatic(shardId string, identifier string) string + IsInterfaceNil() bool +} diff --git a/storage/leveldb/leveldb.go b/storage/leveldb/leveldb.go index 33a349f002c..3e9671bc597 100644 --- a/storage/leveldb/leveldb.go +++ b/storage/leveldb/leveldb.go @@ -206,6 +206,11 @@ func (s *DB) Destroy() error { return err } +// DestroyClosed removes the already closed storage medium stored data +func (s *DB) DestroyClosed() error { + return os.RemoveAll(s.path) +} + // IsInterfaceNil returns true if there is no value under the interface func (s *DB) IsInterfaceNil() bool { if s == nil { diff --git a/storage/leveldb/leveldbSerial.go b/storage/leveldb/leveldbSerial.go index d5ad49bf896..be66bf42682 100644 --- a/storage/leveldb/leveldbSerial.go +++ b/storage/leveldb/leveldbSerial.go @@ -205,6 +205,7 @@ func (s *SerialDB) Close() error { s.mutClosed.Unlock() _ = s.putBatch() + s.cancel() return s.db.Close() @@ -256,6 +257,15 @@ func (s *SerialDB) Destroy() error { return err } +// DestroyClosed removes the already closed storage medium stored data +func (s *SerialDB) DestroyClosed() error { + err := os.RemoveAll(s.path) + if err != nil { + log.Error("error destroy closed", "error", err, "path", s.path) + } + return err +} + func (s *SerialDB) processLoop(ctx context.Context) { for { select { diff --git a/storage/memorydb/lruMemoryDB.go b/storage/memorydb/lruMemoryDB.go index 5e9ded9d44c..019f3a00539 100644 --- a/storage/memorydb/lruMemoryDB.go +++ b/storage/memorydb/lruMemoryDB.go @@ -74,6 +74,11 @@ func (l *lruDB) Destroy() error { return nil } +// DestroyClosed removes the already closed storage medium stored data +func (l *lruDB) DestroyClosed() error { + return l.Destroy() +} + // IsInterfaceNil returns true if there is no value under the interface func (l *lruDB) IsInterfaceNil() bool { if l == nil { diff --git a/storage/memorydb/memorydb.go b/storage/memorydb/memorydb.go index cd5469e8646..8792a4920f1 100644 --- a/storage/memorydb/memorydb.go +++ b/storage/memorydb/memorydb.go @@ -15,11 +15,11 @@ type DB struct { } // New creates a new memorydb object -func New() (*DB, error) { +func New() *DB { return &DB{ db: make(map[string][]byte), mutx: sync.RWMutex{}, - }, nil + } } // Put adds the value to the (key, val) storage medium @@ -91,6 +91,11 @@ func (s *DB) Destroy() error { return nil } +// DestroyClosed removes the storage medium stored data +func (s *DB) DestroyClosed() error { + return s.Destroy() +} + // IsInterfaceNil returns true if there is no value under the interface func (s *DB) IsInterfaceNil() bool { if s == nil { diff --git a/storage/memorydb/memorydb_test.go b/storage/memorydb/memorydb_test.go index 9933f830552..cbdeafb0db8 100644 --- a/storage/memorydb/memorydb_test.go +++ b/storage/memorydb/memorydb_test.go @@ -8,126 +8,93 @@ import ( ) func TestInitNoError(t *testing.T) { - mdb, err := memorydb.New() + mdb := memorydb.New() - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Init() + err := mdb.Init() assert.Nil(t, err, "error initializing db") } func TestPutNoError(t *testing.T) { key, val := []byte("key"), []byte("value") - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Put(key, val) + mdb := memorydb.New() + err := mdb.Put(key, val) assert.Nil(t, err, "error saving in db") } func TestGetPresent(t *testing.T) { key, val := []byte("key1"), []byte("value1") - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Put(key, val) + mdb := memorydb.New() + err := mdb.Put(key, val) assert.Nil(t, err, "error saving in db") v, err := mdb.Get(key) - assert.Nil(t, err, "error not expected but got %s", err) assert.Equal(t, val, v, "expected %s but got %s", val, v) } func TestGetNotPresent(t *testing.T) { key := []byte("key2") - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) + mdb := memorydb.New() v, err := mdb.Get(key) - assert.NotNil(t, err, "error expected but got nil, value %s", v) } func TestHasPresent(t *testing.T) { key, val := []byte("key3"), []byte("value3") - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Put(key, val) + mdb := memorydb.New() + err := mdb.Put(key, val) assert.Nil(t, err, "error saving in db") err = mdb.Has(key) - assert.Nil(t, err, "error not expected but got %s", err) } func TestHasNotPresent(t *testing.T) { key := []byte("key4") - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Has(key) + mdb := memorydb.New() + err := mdb.Has(key) assert.NotNil(t, err) assert.Contains(t, err.Error(), "key not found") } func TestDeletePresent(t *testing.T) { key, val := []byte("key5"), []byte("value5") - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Put(key, val) + mdb := memorydb.New() + err := mdb.Put(key, val) assert.Nil(t, err, "error saving in db") err = mdb.Remove(key) - assert.Nil(t, err, "no error expected but got %s", err) err = mdb.Has(key) - assert.NotNil(t, err, "element not expected as already deleted") assert.Contains(t, err.Error(), "key not found") } func TestDeleteNotPresent(t *testing.T) { key := []byte("key6") - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Remove(key) + mdb := memorydb.New() + err := mdb.Remove(key) assert.Nil(t, err, "no error expected but got %s", err) } func TestClose(t *testing.T) { - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Close() + mdb := memorydb.New() + err := mdb.Close() assert.Nil(t, err, "no error expected but got %s", err) } func TestDestroy(t *testing.T) { - mdb, err := memorydb.New() - - assert.Nil(t, err, "failed to create memorydb: %s", err) - - err = mdb.Destroy() + mdb := memorydb.New() + err := mdb.Destroy() assert.Nil(t, err, "no error expected but got %s", err) } diff --git a/storage/mock/epochStartHandler.go b/storage/mock/epochStartHandler.go new file mode 100644 index 00000000000..ebc7cd23239 --- /dev/null +++ b/storage/mock/epochStartHandler.go @@ -0,0 +1,34 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" +) + +type EpochStartNotifierStub struct { + RegisterHandlerCalled func(handler notifier.SubscribeFunctionHandler) + UnregisterHandlerCalled func(handler notifier.SubscribeFunctionHandler) + NotifyAllCalled func(hdr data.HeaderHandler) +} + +func (esnm *EpochStartNotifierStub) RegisterHandler(handler notifier.SubscribeFunctionHandler) { + if esnm.RegisterHandlerCalled != nil { + esnm.RegisterHandlerCalled(handler) + } +} + +func (esnm *EpochStartNotifierStub) UnregisterHandler(handler notifier.SubscribeFunctionHandler) { + if esnm.UnregisterHandlerCalled != nil { + esnm.UnregisterHandlerCalled(handler) + } +} + +func (esnm *EpochStartNotifierStub) NotifyAll(hdr data.HeaderHandler) { + if esnm.NotifyAllCalled != nil { + esnm.NotifyAllCalled(hdr) + } +} + +func (esnm *EpochStartNotifierStub) IsInterfaceNil() bool { + return esnm == nil +} diff --git a/storage/mock/pathManagerStub.go b/storage/mock/pathManagerStub.go new file mode 100644 index 00000000000..e65b398a0a8 --- /dev/null +++ b/storage/mock/pathManagerStub.go @@ -0,0 +1,28 @@ +package mock + +import "fmt" + +type PathManagerStub struct { + PathForEpochCalled func(shardId string, epoch uint32, identifier string) string + PathForStaticCalled func(shardId string, identifier string) string +} + +func (p *PathManagerStub) PathForEpoch(shardId string, epoch uint32, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForEpochCalled(shardId, epoch, identifier) + } + + return fmt.Sprintf("Epoch_%d/Shard_%s/%s", epoch, shardId, identifier) +} + +func (p *PathManagerStub) PathForStatic(shardId string, identifier string) string { + if p.PathForEpochCalled != nil { + return p.PathForStaticCalled(shardId, identifier) + } + + return fmt.Sprintf("Static/Shard_%s/%s", shardId, identifier) +} + +func (p *PathManagerStub) IsInterfaceNil() bool { + return p == nil +} diff --git a/storage/mock/persisterFactoryStub.go b/storage/mock/persisterFactoryStub.go new file mode 100644 index 00000000000..31c8827926b --- /dev/null +++ b/storage/mock/persisterFactoryStub.go @@ -0,0 +1,23 @@ +package mock + +import ( + "errors" + + "github.com/ElrondNetwork/elrond-go/storage" +) + +type PersisterFactoryStub struct { + CreateCalled func(path string) (storage.Persister, error) +} + +func (pfs *PersisterFactoryStub) Create(path string) (storage.Persister, error) { + if pfs.CreateCalled != nil { + return pfs.CreateCalled(path) + } + + return nil, errors.New("not implemented") +} + +func (pfs *PersisterFactoryStub) IsInterfaceNil() bool { + return pfs == nil +} diff --git a/storage/mock/shardCoordinatorMock.go b/storage/mock/shardCoordinatorMock.go new file mode 100644 index 00000000000..2f59224edd8 --- /dev/null +++ b/storage/mock/shardCoordinatorMock.go @@ -0,0 +1,52 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type ShardCoordinatorMock struct { + SelfShardId uint32 + NumShards uint32 +} + +func NewShardCoordinatorMock(selfShardID uint32, numShards uint32) *ShardCoordinatorMock { + return &ShardCoordinatorMock{ + SelfShardId: selfShardID, + NumShards: numShards, + } +} + +func (scm *ShardCoordinatorMock) NumberOfShards() uint32 { + return scm.NumShards +} + +func (scm *ShardCoordinatorMock) ComputeId(address state.AddressContainer) uint32 { + return 0 +} + +func (scm *ShardCoordinatorMock) SetSelfShardId(shardId uint32) error { + scm.SelfShardId = shardId + return nil +} + +func (scm *ShardCoordinatorMock) SelfId() uint32 { + return scm.SelfShardId +} + +func (scm *ShardCoordinatorMock) SameShard(firstAddress, secondAddress state.AddressContainer) bool { + return true +} + +func (scm *ShardCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { + if destShardID == sharding.MetachainShardId { + return "_0_META" + } + + return "_0" +} + +// IsInterfaceNil returns true if there is no value under the interface +func (scm *ShardCoordinatorMock) IsInterfaceNil() bool { + return scm == nil +} diff --git a/storage/pathmanager/pathManager.go b/storage/pathmanager/pathManager.go new file mode 100644 index 00000000000..92c2bb4fce1 --- /dev/null +++ b/storage/pathmanager/pathManager.go @@ -0,0 +1,64 @@ +package pathmanager + +import ( + "fmt" + "strings" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// PathManager will handle creation of paths for storers +type PathManager struct { + pruningPathTemplate string + staticPathTemplate string +} + +// NewPathManager will return a new instance of PathManager if the provided arguments are fine +func NewPathManager(pruningPathTemplate string, staticPathTemplate string) (*PathManager, error) { + if len(pruningPathTemplate) == 0 { + return nil, storage.ErrEmptyPruningPathTemplate + } + if !strings.Contains(pruningPathTemplate, core.PathEpochPlaceholder) || + !strings.Contains(pruningPathTemplate, core.PathShardPlaceholder) || + !strings.Contains(pruningPathTemplate, core.PathIdentifierPlaceholder) { + return nil, storage.ErrInvalidPruningPathTemplate + } + + if len(staticPathTemplate) == 0 { + return nil, storage.ErrEmptyStaticPathTemplate + } + if !strings.Contains(staticPathTemplate, core.PathShardPlaceholder) || + !strings.Contains(staticPathTemplate, core.PathIdentifierPlaceholder) { + return nil, storage.ErrInvalidStaticPathTemplate + } + + return &PathManager{ + pruningPathTemplate: pruningPathTemplate, + staticPathTemplate: staticPathTemplate, + }, nil +} + +// PathForEpoch will return the new path for a pruning storer +func (pm *PathManager) PathForEpoch(shardId string, epoch uint32, identifier string) string { + path := pm.pruningPathTemplate + path = strings.Replace(path, core.PathEpochPlaceholder, fmt.Sprintf("%d", epoch), 1) + path = strings.Replace(path, core.PathShardPlaceholder, shardId, 1) + path = strings.Replace(path, core.PathIdentifierPlaceholder, identifier, 1) + + return path +} + +// PathForStatic will return the path for a static storer +func (pm *PathManager) PathForStatic(shardId string, identifier string) string { + path := pm.staticPathTemplate + path = strings.Replace(path, core.PathShardPlaceholder, shardId, 1) + path = strings.Replace(path, core.PathIdentifierPlaceholder, identifier, 1) + + return path +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pm *PathManager) IsInterfaceNil() bool { + return pm == nil +} diff --git a/storage/pathmanager/pathManager_test.go b/storage/pathmanager/pathManager_test.go new file mode 100644 index 00000000000..18d98c8598c --- /dev/null +++ b/storage/pathmanager/pathManager_test.go @@ -0,0 +1,152 @@ +package pathmanager_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/pathmanager" + "github.com/stretchr/testify/assert" +) + +func TestNewPathManager_EmptyPruningPathTemplateShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("", "shard_[S]/[I]") + assert.Nil(t, pm) + assert.Equal(t, storage.ErrEmptyPruningPathTemplate, err) +} + +func TestNewPathManager_EmptyStaticPathTemplateShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch_[E]/shard_[S]/[I]", "") + assert.Nil(t, pm) + assert.Equal(t, storage.ErrEmptyStaticPathTemplate, err) +} + +func TestNewPathManager_InvalidPruningPathTemplate_NoShardPlaceholder_ShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch_[E]/shard/[I]", "shard_[S]/[I]") + assert.Nil(t, pm) + assert.Equal(t, storage.ErrInvalidPruningPathTemplate, err) +} + +func TestNewPathManager_InvalidPruningPathTemplate_NoEpochPlaceholder_ShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch/shard_[S]/[I]", "shard_[S]/[I]") + assert.Nil(t, pm) + assert.Equal(t, storage.ErrInvalidPruningPathTemplate, err) +} + +func TestNewPathManager_InvalidPathPruningTemplate_NoIdentifierPlaceholder_ShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch_[E]/shard_[S]", "shard_[S]/[I]") + assert.Nil(t, pm) + assert.Equal(t, storage.ErrInvalidPruningPathTemplate, err) +} + +func TestNewPathManager_InvalidStaticPathTemplate_NoShardPlaceholder_ShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch_[E]/shard_[S]/[I]", "shard/[I]") + assert.Nil(t, pm) + assert.Equal(t, storage.ErrInvalidStaticPathTemplate, err) +} + +func TestNewPathManager_InvalidStaticPathTemplate_NoIdentifierPlaceholder_ShouldErr(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch_[E]/shard_[S]/[I]", "shard_[S]") + assert.Nil(t, pm) + assert.Equal(t, storage.ErrInvalidStaticPathTemplate, err) +} + +func TestNewPathManager_OkValsShouldWork(t *testing.T) { + t.Parallel() + + pm, err := pathmanager.NewPathManager("epoch_[E]/shard_[S]/[I]", "shard_[S]/[I]") + assert.NotNil(t, pm) + assert.Nil(t, err) +} + +func TestPathManager_PathForEpoch(t *testing.T) { + t.Parallel() + + type args struct { + shardId string + epoch uint32 + identifier string + } + tests := []struct { + name string + args args + want string + }{ + { + args: args{shardId: "0", epoch: 2, identifier: "table"}, + want: "Epoch_2/Shard_0/table", + }, + { + args: args{shardId: "metachain", epoch: 2654, identifier: "table23"}, + want: "Epoch_2654/Shard_metachain/table23", + }, + { + args: args{shardId: "0", epoch: 0, identifier: ""}, + want: "Epoch_0/Shard_0/", + }, + { + args: args{shardId: "53", epoch: 25839, identifier: "table1"}, + want: "Epoch_25839/Shard_53/table1", + }, + } + pruningPathTemplate := "Epoch_[E]/Shard_[S]/[I]" + staticPathTemplate := "Shard_[S]/[I]" + pm, _ := pathmanager.NewPathManager(pruningPathTemplate, staticPathTemplate) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := pm.PathForEpoch(tt.args.shardId, tt.args.epoch, tt.args.identifier); got != tt.want { + t.Errorf("PathForEpoch() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPathManager_PathForStatic(t *testing.T) { + t.Parallel() + + type args struct { + shardId string + identifier string + } + tests := []struct { + name string + args args + want string + }{ + { + args: args{shardId: "0", identifier: "table"}, + want: "Static/Shard_0/table", + }, + { + args: args{shardId: "metachain", identifier: "table23"}, + want: "Static/Shard_metachain/table23", + }, + { + args: args{shardId: "0", identifier: ""}, + want: "Static/Shard_0/", + }, + } + pruningPathTemplate := "Epoch_[E]/Shard_[S]/[I]" + staticPathTemplate := "Static/Shard_[S]/[I]" + pm, _ := pathmanager.NewPathManager(pruningPathTemplate, staticPathTemplate) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := pm.PathForStatic(tt.args.shardId, tt.args.identifier); got != tt.want { + t.Errorf("PathForEpoch() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/storage/pruning/export_test.go b/storage/pruning/export_test.go new file mode 100644 index 00000000000..83afd21f7e4 --- /dev/null +++ b/storage/pruning/export_test.go @@ -0,0 +1,9 @@ +package pruning + +func (ps *PruningStorer) ChangeEpoch(epochNum uint32) error { + return ps.changeEpoch(epochNum) +} + +func RemoveDirectoryIfEmpty(path string) { + removeDirectoryIfEmpty(path) +} diff --git a/storage/pruning/fileHandling.go b/storage/pruning/fileHandling.go new file mode 100644 index 00000000000..2f70d2f3dfd --- /dev/null +++ b/storage/pruning/fileHandling.go @@ -0,0 +1,54 @@ +package pruning + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// removeDirectoryIfEmpty will clean the directories after all persisters for one epoch were destroyed +// the structure is this way : +// workspace/db/Epoch_X/Shard_Y/DbName +// we need to remove the last 3 directories if everything is empty +func removeDirectoryIfEmpty(path string) { + elementsSplitBySeparator := strings.Split(path, string(os.PathSeparator)) + + epochDirectory := "" + for idx := 0; idx < len(elementsSplitBySeparator)-2; idx++ { + epochDirectory += elementsSplitBySeparator[idx] + string(os.PathSeparator) + } + + if len(elementsSplitBySeparator) > 2 { // if length is less than 2, the path is incorrect + shardDirectory := epochDirectory + elementsSplitBySeparator[len(elementsSplitBySeparator)-2] + if isDirectoryEmpty(shardDirectory) { + err := os.RemoveAll(shardDirectory) + if err != nil { + log.Debug("delete old db directory", "error", err.Error()) + } + + if isDirectoryEmpty(epochDirectory) { + err = os.RemoveAll(epochDirectory) + if err != nil { + log.Debug("delete old db directory", "error", err.Error()) + } + } + } + } +} + +func isDirectoryEmpty(name string) bool { + f, err := os.Open(filepath.Clean(name)) + if err != nil { + return false + } + defer func() { + err = f.Close() + if err != nil { + log.Debug("pruning db - file close", "error", err.Error()) + } + }() + + _, err = f.Readdirnames(1) // Or f.Readdir(1) + return err == io.EOF +} diff --git a/storage/pruning/interface.go b/storage/pruning/interface.go new file mode 100644 index 00000000000..3b11f19a248 --- /dev/null +++ b/storage/pruning/interface.go @@ -0,0 +1,19 @@ +package pruning + +import ( + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" + "github.com/ElrondNetwork/elrond-go/storage" +) + +// EpochStartNotifier defines +type EpochStartNotifier interface { + RegisterHandler(handler notifier.SubscribeFunctionHandler) + UnregisterHandler(handler notifier.SubscribeFunctionHandler) + IsInterfaceNil() bool +} + +// DbFactoryHandler defines what a db factory implementation should do +type DbFactoryHandler interface { + Create(filePath string) (storage.Persister, error) + IsInterfaceNil() bool +} diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go new file mode 100644 index 00000000000..258c51c65fd --- /dev/null +++ b/storage/pruning/pruningStorer.go @@ -0,0 +1,489 @@ +package pruning + +import ( + "encoding/base64" + "errors" + "fmt" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/epochStart/notifier" + "github.com/ElrondNetwork/elrond-go/logger" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +var log = logger.GetOrCreate("storage/pruning") + +// persisterData structure is used so the persister and its path can be kept in the same place +type persisterData struct { + persister storage.Persister + path string + isClosed bool +} + +// PruningStorer represents a storer which creates a new persister for each epoch and removes older activePersisters +type PruningStorer struct { + pruningEnabled bool + lock sync.RWMutex + shardCoordinator sharding.Coordinator + fullArchive bool + activePersisters []*persisterData + persistersMapByEpoch map[uint32]*persisterData + cacher storage.Cacher + bloomFilter storage.BloomFilter + pathManager storage.PathManagerHandler + dbPath string + persisterFactory DbFactoryHandler + numOfEpochsToKeep uint32 + numOfActivePersisters uint32 + identifier string +} + +// NewPruningStorer will return a new instance of PruningStorer without sharded directories' naming scheme +func NewPruningStorer(args *StorerArgs) (*PruningStorer, error) { + return initPruningStorer(args, "") +} + +// NewShardedPruningStorer will return a new instance of PruningStorer with sharded directories' naming scheme +func NewShardedPruningStorer( + args *StorerArgs, + shardId uint32, +) (*PruningStorer, error) { + shardIdStr := fmt.Sprintf("%d", shardId) + return initPruningStorer(args, shardIdStr) +} + +// initPruningStorer will create a PruningStorer with or without sharded directories' naming scheme +func initPruningStorer( + args *StorerArgs, + shardIdStr string, +) (*PruningStorer, error) { + var cache storage.Cacher + var db storage.Persister + var bf storage.BloomFilter + var err error + + defer func() { + if err != nil && db != nil { + _ = db.Destroy() + } + }() + + if args.NumOfActivePersisters < 1 { + return nil, storage.ErrInvalidNumberOfPersisters + } + if check.IfNil(args.Notifier) { + return nil, storage.ErrNilEpochStartNotifier + } + if check.IfNil(args.PersisterFactory) { + return nil, storage.ErrNilPersisterFactory + } + if check.IfNil(args.ShardCoordinator) { + return nil, storage.ErrNilShardCoordinator + } + if check.IfNil(args.PathManager) { + return nil, storage.ErrNilPathManager + } + + cache, err = storageUnit.NewCache(args.CacheConf.Type, args.CacheConf.Size, args.CacheConf.Shards) + if err != nil { + return nil, err + } + + filePath := args.PathManager.PathForEpoch(core.GetShardIdString(args.ShardCoordinator.SelfId()), args.StartingEpoch, args.Identifier) + if len(shardIdStr) > 0 { + filePath = filePath + shardIdStr + args.Identifier += shardIdStr + } + db, err = args.PersisterFactory.Create(filePath) + if err != nil { + return nil, err + } + + var persisters []*persisterData + persisters = append(persisters, &persisterData{ + persister: db, + path: filePath, + isClosed: false, + }) + + persistersMapByEpoch := make(map[uint32]*persisterData) + persistersMapByEpoch[args.StartingEpoch] = persisters[0] + + pdb := &PruningStorer{ + pruningEnabled: args.PruningEnabled, + identifier: args.Identifier, + fullArchive: args.FullArchive, + activePersisters: persisters, + persisterFactory: args.PersisterFactory, + shardCoordinator: args.ShardCoordinator, + persistersMapByEpoch: persistersMapByEpoch, + cacher: cache, + bloomFilter: nil, + pathManager: args.PathManager, + dbPath: args.DbPath, + numOfEpochsToKeep: args.NumOfEpochsToKeep, + numOfActivePersisters: args.NumOfActivePersisters, + } + + if args.BloomFilterConf.Size != 0 { // if size is 0, that means an empty config was used so bloom filter will be nil + bf, err = storageUnit.NewBloomFilter(args.BloomFilterConf) + if err != nil { + return nil, err + } + + pdb.bloomFilter = bf + } + + err = pdb.activePersisters[0].persister.Init() + if err != nil { + return nil, err + } + + pdb.registerHandler(args.Notifier) + + return pdb, nil +} + +// Put adds data to both cache and persistence medium and updates the bloom filter +func (ps *PruningStorer) Put(key, data []byte) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + ps.cacher.Put(key, data) + + err := ps.activePersisters[0].persister.Put(key, data) + if err != nil { + ps.cacher.Remove(key) + return err + } + + if ps.bloomFilter != nil { + ps.bloomFilter.Add(key) + } + + return nil +} + +// Get searches the key in the cache. In case it is not found, it verifies with the bloom filter +// if the key may be in the db. If bloom filter confirms then it further searches in the databases. +func (ps *PruningStorer) Get(key []byte) ([]byte, error) { + ps.lock.Lock() + defer ps.lock.Unlock() + + v, ok := ps.cacher.Get(key) + var err error + + if !ok { + // not found in cache + // search it in active persisters + found := false + for idx := uint32(0); (idx < ps.numOfActivePersisters) && (idx < uint32(len(ps.activePersisters))); idx++ { + if ps.bloomFilter == nil || ps.bloomFilter.MayContain(key) { + v, err = ps.activePersisters[idx].persister.Get(key) + if err != nil { + continue + } + + found = true + // if found in persistence unit, add it to cache + ps.cacher.Put(key, v) + break + } + } + if !found { + return nil, fmt.Errorf("key %s not found in %s", + base64.StdEncoding.EncodeToString(key), ps.identifier) + } + } + + return v.([]byte), nil +} + +// GetFromEpoch will search a key only in the persister for the given epoch +func (ps *PruningStorer) GetFromEpoch(key []byte, epoch uint32) ([]byte, error) { + // TODO: this will be used when requesting from resolvers + ps.lock.Lock() + defer ps.lock.Unlock() + + v, ok := ps.cacher.Get(key) + if ok { + return v.([]byte), nil + } + + persisterData, exists := ps.persistersMapByEpoch[epoch] + if !exists { + return nil, fmt.Errorf("key %s not found in %s", + base64.StdEncoding.EncodeToString(key), ps.identifier) + } + + if !persisterData.isClosed { + return persisterData.persister.Get(key) + } + + persister, err := ps.persisterFactory.Create(persisterData.path) + if err != nil { + log.Debug("open old persister", "error", err.Error()) + return nil, err + } + + defer func() { + err = persister.Close() + if err != nil { + log.Debug("persister.Close()", "error", err.Error()) + } + }() + + err = persister.Init() + if err != nil { + log.Debug("init old persister", "error", err.Error()) + return nil, err + } + + res, err := persister.Get(key) + if err == nil { + return res, nil + } + + log.Warn("get from closed persister", + "id", ps.identifier, + "epoch", epoch, + "key", key, + "error", err.Error()) + + return nil, fmt.Errorf("key %s not found in %s", + base64.StdEncoding.EncodeToString(key), ps.identifier) + +} + +// Has checks if the key is in the Unit. +// It first checks the cache. If it is not found, it checks the bloom filter +// and if present it checks the db +func (ps *PruningStorer) Has(key []byte) error { + ps.lock.RLock() + defer ps.lock.RUnlock() + + has := ps.cacher.Has(key) + if has { + return nil + } + + if ps.bloomFilter == nil || ps.bloomFilter.MayContain(key) { + for _, persister := range ps.activePersisters { + if persister.persister.Has(key) != nil { + continue + } + + return nil + } + } + + return storage.ErrKeyNotFound +} + +// HasInEpoch checks if the key is in the Unit in a given epoch. +// It first checks the cache. If it is not found, it checks the bloom filter +// and if present it checks the db +func (ps *PruningStorer) HasInEpoch(key []byte, epoch uint32) error { + // TODO: this will be used when requesting from resolvers + ps.lock.RLock() + defer ps.lock.RUnlock() + + has := ps.cacher.Has(key) + if has { + return nil + } + + if ps.bloomFilter == nil || ps.bloomFilter.MayContain(key) { + persisterData, ok := ps.persistersMapByEpoch[epoch] + if !ok { + return storage.ErrKeyNotFound + } + + if !persisterData.isClosed { + return persisterData.persister.Has(key) + } + + persister, err := ps.persisterFactory.Create(persisterData.path) + if err != nil { + log.Debug("open old persister", "error", err.Error()) + return err + } + + defer func() { + err = persister.Close() + if err != nil { + log.Debug("persister.Close()", "error", err.Error()) + } + }() + + err = persister.Init() + if err != nil { + log.Debug("init old persister", "error", err.Error()) + return err + } + + return persister.Has(key) + } + + return storage.ErrKeyNotFound +} + +// Remove removes the data associated to the given key from both cache and persistence medium +func (ps *PruningStorer) Remove(key []byte) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + var err error + ps.cacher.Remove(key) + for _, persisterData := range ps.activePersisters { + err = persisterData.persister.Remove(key) + if err == nil { + return nil + } + } + + return err +} + +// ClearCache cleans up the entire cache +func (ps *PruningStorer) ClearCache() { + ps.cacher.Clear() +} + +// DestroyUnit cleans up the bloom filter, the cache, and the dbs +func (ps *PruningStorer) DestroyUnit() error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.bloomFilter != nil { + ps.bloomFilter.Clear() + } + + ps.cacher.Clear() + + var err error + numOfPersistersRemoved := 0 + totalNumOfPersisters := len(ps.persistersMapByEpoch) + for _, persisterData := range ps.persistersMapByEpoch { + if persisterData.isClosed { + err = persisterData.persister.DestroyClosed() + } else { + err = persisterData.persister.Destroy() + } + + if err != nil { + log.Debug("pruning db: destroy", + "error", err.Error()) + continue + } + numOfPersistersRemoved++ + } + + if numOfPersistersRemoved != totalNumOfPersisters { + log.Debug("error destroying pruning db", + "identifier", ps.identifier, + "destroyed", numOfPersistersRemoved, + "total", totalNumOfPersisters) + return storage.ErrDestroyingUnit + } + + return nil +} + +// registerHandler will register a new function to the epoch start notifier +func (ps *PruningStorer) registerHandler(handler EpochStartNotifier) { + subscribeHandler := notifier.MakeHandlerForEpochStart(func(hdr data.HeaderHandler) { + err := ps.changeEpoch(hdr.GetEpoch()) + if err != nil { + log.Warn("change epoch in storer", "error", err.Error()) + } + }) + + handler.RegisterHandler(subscribeHandler) +} + +// changeEpoch will handle creating a new persister and removing of the older ones +func (ps *PruningStorer) changeEpoch(epoch uint32) error { + // if pruning is not enabled, don't create new persisters, but use the same one instead + if !ps.pruningEnabled { + return nil + } + + ps.lock.Lock() + defer ps.lock.Unlock() + + shardId := core.GetShardIdString(ps.shardCoordinator.SelfId()) + filePath := ps.pathManager.PathForEpoch(shardId, epoch, ps.identifier) + db, err := ps.persisterFactory.Create(filePath) + if err != nil { + log.Warn("change epoch error", "error - "+ps.identifier, err.Error()) + return err + } + + newPersister := &persisterData{ + persister: db, + path: filePath, + isClosed: false, + } + + singleItemPersisters := []*persisterData{newPersister} + ps.activePersisters = append(singleItemPersisters, ps.activePersisters...) + ps.persistersMapByEpoch[epoch] = newPersister + + err = ps.activePersisters[0].persister.Init() + if err != nil { + return err + } + + err = ps.closeAndDestroyPersisters(epoch) + if err != nil { + log.Debug("closing and destroying old persister", "error", err.Error()) + return err + } + + return nil +} + +func (ps *PruningStorer) closeAndDestroyPersisters(epoch uint32) error { + // recent activePersisters have to he closed for both scenarios: full archive or not + if ps.numOfActivePersisters < uint32(len(ps.activePersisters)) { + persisterToClose := ps.activePersisters[ps.numOfActivePersisters] + err := persisterToClose.persister.Close() + if err != nil { + log.Error("error closing persister", "error", err.Error(), "id", ps.identifier) + return err + } + // remove it from the active persisters slice + ps.activePersisters = ps.activePersisters[:ps.numOfActivePersisters] + persisterToClose.isClosed = true + epochToClose := epoch - ps.numOfActivePersisters + ps.persistersMapByEpoch[epochToClose] = persisterToClose + } + + if !ps.fullArchive && uint32(len(ps.persistersMapByEpoch)) > ps.numOfEpochsToKeep { + epochToRemove := epoch - ps.numOfEpochsToKeep + persisterToDestroy, ok := ps.persistersMapByEpoch[epochToRemove] + if !ok { + return errors.New("persister to destroy not found") + } + delete(ps.persistersMapByEpoch, epochToRemove) + + err := persisterToDestroy.persister.DestroyClosed() + if err != nil { + return err + } + removeDirectoryIfEmpty(persisterToDestroy.path) + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ps *PruningStorer) IsInterfaceNil() bool { + return ps == nil +} diff --git a/storage/pruning/pruningStorerArgs.go b/storage/pruning/pruningStorerArgs.go new file mode 100644 index 00000000000..4ec661f5310 --- /dev/null +++ b/storage/pruning/pruningStorerArgs.go @@ -0,0 +1,24 @@ +package pruning + +import ( + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" +) + +// StorerArgs will hold the arguments needed for PruningStorer +type StorerArgs struct { + Identifier string + PruningEnabled bool + ShardCoordinator sharding.Coordinator + StartingEpoch uint32 + FullArchive bool + CacheConf storageUnit.CacheConfig + PathManager storage.PathManagerHandler + DbPath string + PersisterFactory DbFactoryHandler + BloomFilterConf storageUnit.BloomConfig + NumOfEpochsToKeep uint32 + NumOfActivePersisters uint32 + Notifier EpochStartNotifier +} diff --git a/storage/pruning/pruningStorer_test.go b/storage/pruning/pruningStorer_test.go new file mode 100644 index 00000000000..f01cd4a22f1 --- /dev/null +++ b/storage/pruning/pruningStorer_test.go @@ -0,0 +1,426 @@ +package pruning_test + +import ( + "encoding/json" + "fmt" + "os" + "regexp" + "strings" + "testing" + + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/storage/memorydb" + "github.com/ElrondNetwork/elrond-go/storage/mock" + "github.com/ElrondNetwork/elrond-go/storage/pruning" + "github.com/ElrondNetwork/elrond-go/storage/storageUnit" + "github.com/stretchr/testify/assert" +) + +func getDummyConfig() (storageUnit.CacheConfig, storageUnit.DBConfig, storageUnit.BloomConfig) { + cacheConf := storageUnit.CacheConfig{ + Size: 10, + Type: "LRU", + Shards: 3, + } + dbConf := storageUnit.DBConfig{ + FilePath: "path/Epoch_0/Shard_1", + Type: "LvlDBSerial", + BatchDelaySeconds: 500, + MaxBatchSize: 1, + MaxOpenFiles: 1000, + } + blConf := storageUnit.BloomConfig{} + return cacheConf, dbConf, blConf +} + +func getDefaultArgs() *pruning.StorerArgs { + cacheConf, dbConf, blConf := getDummyConfig() + persisterFactory := &mock.PersisterFactoryStub{ + CreateCalled: func(path string) (storage.Persister, error) { + return memorydb.New(), nil + }, + } + return &pruning.StorerArgs{ + PruningEnabled: true, + Identifier: "id", + FullArchive: false, + ShardCoordinator: mock.NewShardCoordinatorMock(0, 2), + PathManager: &mock.PathManagerStub{}, + CacheConf: cacheConf, + DbPath: dbConf.FilePath, + PersisterFactory: persisterFactory, + BloomFilterConf: blConf, + NumOfEpochsToKeep: 2, + NumOfActivePersisters: 2, + Notifier: &mock.EpochStartNotifierStub{}, + } +} + +func TestNewPruningStorer_InvalidNumberOfActivePersistersShouldErr(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.NumOfActivePersisters = 0 + + ps, err := pruning.NewPruningStorer(args) + + assert.Nil(t, ps) + assert.Equal(t, storage.ErrInvalidNumberOfPersisters, err) +} + +func TestNewPruningStorer_NilEpochStartHandlerShouldErr(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.Notifier = nil + ps, err := pruning.NewPruningStorer(args) + + assert.Nil(t, ps) + assert.Equal(t, storage.ErrNilEpochStartNotifier, err) +} + +func TestNewPruningStorer_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.ShardCoordinator = nil + ps, err := pruning.NewPruningStorer(args) + + assert.Nil(t, ps) + assert.Equal(t, storage.ErrNilShardCoordinator, err) +} + +func TestNewPruningStorer_NilPathManagerShouldErr(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.PathManager = nil + ps, err := pruning.NewPruningStorer(args) + + assert.Nil(t, ps) + assert.Equal(t, storage.ErrNilPathManager, err) +} + +func TestNewPruningStorer_NilPersisterFactoryShouldErr(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.PersisterFactory = nil + ps, err := pruning.NewPruningStorer(args) + + assert.Nil(t, ps) + assert.Equal(t, storage.ErrNilPersisterFactory, err) +} + +func TestNewPruningStorer_OkValsShouldWork(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, err := pruning.NewPruningStorer(args) + + assert.NotNil(t, ps) + assert.Nil(t, err) + assert.False(t, ps.IsInterfaceNil()) +} + +func TestNewShardedPruningStorer_OkValsShouldWork(t *testing.T) { + t.Parallel() + + shardId := uint32(7) + shardIdStr := fmt.Sprintf("%d", shardId) + args := getDefaultArgs() + args.PersisterFactory = &mock.PersisterFactoryStub{ + CreateCalled: func(path string) (storage.Persister, error) { + if !strings.Contains(path, shardIdStr) { + assert.Fail(t, "path not set correctly") + } + + return memorydb.New(), nil + }, + } + ps, err := pruning.NewShardedPruningStorer(args, shardId) + + assert.NotNil(t, ps) + assert.Nil(t, err) + assert.False(t, ps.IsInterfaceNil()) +} + +func TestPruningStorer_PutAndGetShouldWork(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewPruningStorer(args) + + testKey, testVal := []byte("key"), []byte("value") + err := ps.Put(testKey, testVal) + assert.Nil(t, err) + + res, err := ps.Get(testKey) + assert.Nil(t, err) + assert.Equal(t, testVal, res) +} + +func TestPruningStorer_RemoveShouldWork(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewPruningStorer(args) + + testKey, testVal := []byte("key"), []byte("value") + err := ps.Put(testKey, testVal) + assert.Nil(t, err) + + // make sure that the key is there + res, err := ps.Get(testKey) + assert.Nil(t, err) + assert.Equal(t, testVal, res) + + // now remove it + err = ps.Remove(testKey) + assert.Nil(t, err) + + // it should have been removed from the persister and cache + res, err = ps.Get(testKey) + assert.NotNil(t, err) + assert.Nil(t, res) +} + +func TestPruningStorer_DestroyUnitShouldWork(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + args.NumOfEpochsToKeep = 3 + ps, _ := pruning.NewPruningStorer(args) + + // simulate the passing of 2 epochs in order to have more persisters. + // we will store 3 epochs with 2 active. all 3 should be removed + _ = ps.ChangeEpoch(1) + _ = ps.ChangeEpoch(2) + + err := ps.DestroyUnit() + assert.Nil(t, err) +} + +func TestNewPruningStorer_Has_OnePersisterShouldWork(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewPruningStorer(args) + + testKey, testVal := []byte("key"), []byte("value") + err := ps.Put(testKey, testVal) + assert.Nil(t, err) + + err = ps.Has(testKey) + assert.Nil(t, err) + + wrongKey := []byte("wrong_key") + err = ps.Has(wrongKey) + assert.NotNil(t, err) +} + +func TestNewPruningStorer_Has_MultiplePersistersShouldWork(t *testing.T) { + t.Parallel() + + persistersByPath := make(map[string]storage.Persister) + persistersByPath["Epoch_0"] = memorydb.New() + args := getDefaultArgs() + args.DbPath = "Epoch_0" + args.PersisterFactory = &mock.PersisterFactoryStub{ + // simulate an opening of an existing database from the file path by saving activePersisters in a map based on their path + CreateCalled: func(path string) (storage.Persister, error) { + if _, ok := persistersByPath[path]; ok { + return persistersByPath[path], nil + } + newPers := memorydb.New() + persistersByPath[path] = newPers + + return newPers, nil + }, + } + args.NumOfActivePersisters = 1 + args.NumOfEpochsToKeep = 2 + ps, _ := pruning.NewPruningStorer(args) + + testKey, testVal := []byte("key"), []byte("value") + err := ps.Put(testKey, testVal) + assert.Nil(t, err) + + ps.ClearCache() + err = ps.Has(testKey) + assert.Nil(t, err) + + _ = ps.ChangeEpoch(1) + ps.ClearCache() + + // data should still be available in the closed persister + err = ps.HasInEpoch(testKey, 0) + assert.Nil(t, err) + + // data should not be available when calling in another epoch + err = ps.HasInEpoch(testKey, 1) + assert.NotNil(t, err) + + // after one more epoch change, the persister which holds the data should be removed and the key should not be available + _ = ps.ChangeEpoch(2) + ps.ClearCache() + + err = ps.HasInEpoch(testKey, 0) + assert.NotNil(t, err) +} + +func TestNewPruningStorer_OldDataHasToBeRemoved(t *testing.T) { + t.Parallel() + + args := getDefaultArgs() + ps, _ := pruning.NewPruningStorer(args) + + // add a key and then make 2 epoch changes so the data won't be available anymore + testKey, _ := json.Marshal([]byte("key")) + testVal := []byte("value") + err := ps.Put(testKey, testVal) + assert.Nil(t, err) + + ps.ClearCache() + + // first check that data is available + res, err := ps.Get(testKey) + assert.Nil(t, err) + assert.Equal(t, testVal, res) + + // now change the epoch once + err = ps.ChangeEpoch(1) + assert.Nil(t, err) + + ps.ClearCache() + + // check if data is still available + res, err = ps.Get(testKey) + assert.Nil(t, err) + assert.Equal(t, testVal, res) + + // now change the epoch again + err = ps.ChangeEpoch(2) + assert.Nil(t, err) + + ps.ClearCache() + + // data shouldn't be available anymore + res, err = ps.Get(testKey) + assert.Nil(t, res) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) +} + +func TestNewPruningStorer_GetDataFromClosedPersister(t *testing.T) { + t.Parallel() + + persistersByPath := make(map[string]storage.Persister) + persistersByPath["Epoch_0"] = memorydb.New() + args := getDefaultArgs() + args.DbPath = "Epoch_0" + args.PersisterFactory = &mock.PersisterFactoryStub{ + // simulate an opening of an existing database from the file path by saving activePersisters in a map based on their path + CreateCalled: func(path string) (storage.Persister, error) { + if _, ok := persistersByPath[path]; ok { + return persistersByPath[path], nil + } + newPers := memorydb.New() + persistersByPath[path] = newPers + + return newPers, nil + }, + } + args.NumOfActivePersisters = 1 + ps, _ := pruning.NewPruningStorer(args) + + // add a key and then make 2 epoch changes so the data won't be available anymore + testKey, _ := json.Marshal([]byte("key")) + testVal := []byte("value") + err := ps.Put(testKey, testVal) + assert.Nil(t, err) + + ps.ClearCache() + + // first check that data is available + res, err := ps.Get(testKey) + assert.Nil(t, err) + assert.Equal(t, testVal, res) + + // now change the epoch so the first persister will be closed as only one persister is active at a moment. + err = ps.ChangeEpoch(1) + assert.Nil(t, err) + + ps.ClearCache() + + // check if data is still available after searching in closed activePersisters + res, err = ps.GetFromEpoch(testKey, 0) + assert.Nil(t, err) + assert.Equal(t, testVal, res) +} + +func TestNewPruningStorer_ChangeEpochDbsShouldNotBeDeletedIfPruningIsDisabled(t *testing.T) { + t.Parallel() + + persistersByPath := make(map[string]storage.Persister) + args := getDefaultArgs() + args.DbPath = "Epoch_0" + args.PruningEnabled = false + args.PersisterFactory = &mock.PersisterFactoryStub{ + // simulate an opening of an existing database from the file path by saving activePersisters in a map based on their path + CreateCalled: func(path string) (storage.Persister, error) { + if _, ok := persistersByPath[path]; ok { + return persistersByPath[path], nil + } + newPers := memorydb.New() + persistersByPath[path] = newPers + + return newPers, nil + }, + } + args.NumOfActivePersisters = 1 + ps, _ := pruning.NewPruningStorer(args) + + // change the epoch multiple times + _ = ps.ChangeEpoch(1) + _ = ps.ChangeEpoch(2) + _ = ps.ChangeEpoch(3) + + assert.Equal(t, 1, len(persistersByPath)) +} + +func TestRegex(t *testing.T) { + t.Parallel() + + expectedRes := "db/Epoch_7/Shard_2" + replacementEpoch := "Epoch_7" + + var testPaths []string + testPaths = append(testPaths, "db/Epoch_22282493984354/Shard_2") + testPaths = append(testPaths, "db/Epoch_0/Shard_2") + testPaths = append(testPaths, "db/Epoch_02/Shard_2") + testPaths = append(testPaths, "db/Epoch_99999999999999999999999999999999999999999999/Shard_2") + + rg := regexp.MustCompile(`Epoch_\d+`) + + for _, path := range testPaths { + assert.Equal(t, expectedRes, rg.ReplaceAllString(path, replacementEpoch)) + } +} + +func TestDirectories(t *testing.T) { + pathToCreate := "user-directory/go/src/workspace/db/Epoch_2/Shard_27" + pathParameter := pathToCreate + "/MiniBlock" + // should become user-directory/go/src/workspace/db + + err := os.MkdirAll(pathToCreate, os.ModePerm) + assert.Nil(t, err) + + pruning.RemoveDirectoryIfEmpty(pathParameter) + + if _, err := os.Stat(pathParameter); !os.IsNotExist(err) { + assert.Fail(t, "directory should have been removed") + } + + _ = os.RemoveAll("user-directory") +} diff --git a/storage/storageUnit/storageunit_test.go b/storage/storageUnit/storageunit_test.go index f7c03aaa31c..c582536f6a9 100644 --- a/storage/storageUnit/storageunit_test.go +++ b/storage/storageUnit/storageunit_test.go @@ -31,11 +31,10 @@ func logError(err error) { } func initStorageUnitWithBloomFilter(t *testing.T, cSize int) *storageUnit.Unit { - mdb, err1 := memorydb.New() + mdb := memorydb.New() cache, err2 := lrucache.NewCache(cSize) bf := bloom.NewDefaultFilter() - assert.Nil(t, err1, "failed creating db: %s", err1) assert.Nil(t, err2, "no error expected but got %s", err2) sUnit, err := storageUnit.NewStorageUnitWithBloomFilter(cache, mdb, bf) @@ -46,10 +45,9 @@ func initStorageUnitWithBloomFilter(t *testing.T, cSize int) *storageUnit.Unit { } func initStorageUnitWithNilBloomFilter(t *testing.T, cSize int) *storageUnit.Unit { - mdb, err1 := memorydb.New() + mdb := memorydb.New() cache, err2 := lrucache.NewCache(cSize) - assert.Nil(t, err1, "failed creating db: %s", err1) assert.Nil(t, err2, "no error expected but got %s", err2) sUnit, err := storageUnit.NewStorageUnit(cache, mdb) @@ -71,34 +69,28 @@ func TestStorageUnitNilPersister(t *testing.T) { } func TestStorageUnitNilCacher(t *testing.T) { - mdb, err1 := memorydb.New() + mdb := memorydb.New() bf := bloom.NewDefaultFilter() - assert.Nil(t, err1, "failed creating db") - - _, err1 = storageUnit.NewStorageUnitWithBloomFilter(nil, mdb, bf) - + _, err1 := storageUnit.NewStorageUnitWithBloomFilter(nil, mdb, bf) assert.NotNil(t, err1, "expected failure") } func TestStorageUnitNilBloomFilter(t *testing.T) { cache, err1 := lrucache.NewCache(10) - mdb, err2 := memorydb.New() + mdb := memorydb.New() assert.Nil(t, err1, "no error expected but got %s", err1) - assert.Nil(t, err2, "failed creating db") _, err := storageUnit.NewStorageUnit(cache, mdb) - assert.Nil(t, err, "did not expect failure") } func TestStorageUnit_NilBloomFilterShouldErr(t *testing.T) { cache, err1 := lrucache.NewCache(10) - mdb, err2 := memorydb.New() + mdb := memorydb.New() assert.Nil(t, err1, "no error expected but got %s", err1) - assert.Nil(t, err2, "failed creating db") sUnit, err := storageUnit.NewStorageUnitWithBloomFilter(cache, mdb, nil) diff --git a/storage/txcache/concurrentMap.go b/storage/txcache/concurrentMap.go index a07bcc825ae..ce0de902d15 100644 --- a/storage/txcache/concurrentMap.go +++ b/storage/txcache/concurrentMap.go @@ -11,6 +11,7 @@ import ( // ConcurrentMap is a thread safe map of type string:Anything. // To avoid lock bottlenecks this map is divided to several map chunks. type ConcurrentMap struct { + mutex sync.Mutex nChunks uint32 chunks []*concurrentMapChunk } @@ -23,6 +24,11 @@ type concurrentMapChunk struct { // NewConcurrentMap creates a new concurrent map. func NewConcurrentMap(nChunks uint32) *ConcurrentMap { + // We cannot have a map with no chunks + if nChunks == 0 { + nChunks = 1 + } + m := ConcurrentMap{ nChunks: nChunks, chunks: make([]*concurrentMapChunk, nChunks), @@ -50,6 +56,19 @@ func (m *ConcurrentMap) Set(key string, value interface{}) { chunk.Unlock() } +// SetIfAbsent sets the given value under the specified key if no value was associated with it. +func (m *ConcurrentMap) SetIfAbsent(key string, value interface{}) bool { + // Get map shard. + chunk := m.getChunk(key) + chunk.Lock() + _, ok := chunk.items[key] + if !ok { + chunk.items[key] = value + } + chunk.Unlock() + return !ok +} + // Get retrieves an element from map under given key. func (m *ConcurrentMap) Get(key string) (interface{}, bool) { chunk := m.getChunk(key) @@ -88,11 +107,6 @@ func (m *ConcurrentMap) Remove(key string) { chunk.Unlock() } -// IsEmpty checks if map is empty. -func (m *ConcurrentMap) IsEmpty() bool { - return m.Count() == 0 -} - // IterCb is an iterator callback type IterCb func(key string, v interface{}) @@ -118,3 +132,45 @@ func fnv32(key string) uint32 { } return hash } + +// Clear clears the map +func (m *ConcurrentMap) Clear() { + // There is no need to explicitly remove each item for each shard + // The garbage collector will remove the data from memory + + // Assignment is not an atomic operation, so we have to wrap this in a critical section + m.mutex.Lock() + m.chunks = make([]*concurrentMapChunk, m.nChunks) + m.mutex.Unlock() +} + +// Keys returns all keys as []string +func (m *ConcurrentMap) Keys() []string { + count := m.Count() + ch := make(chan string, count) + go func() { + // Foreach shard. + wg := sync.WaitGroup{} + wg.Add(int(m.nChunks)) + for _, shard := range m.chunks { + go func(shard *concurrentMapChunk) { + // Foreach key, value pair. + shard.RLock() + for key := range shard.items { + ch <- key + } + shard.RUnlock() + wg.Done() + }(shard) + } + wg.Wait() + close(ch) + }() + + // Generate keys + keys := make([]string, 0, count) + for k := range ch { + keys = append(keys, k) + } + return keys +} diff --git a/storage/txcache/concurrentMap_test.go b/storage/txcache/concurrentMap_test.go new file mode 100644 index 00000000000..7076f07a885 --- /dev/null +++ b/storage/txcache/concurrentMap_test.go @@ -0,0 +1,29 @@ +package txcache + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TODO: Add more unit tests after moving this to core package (later in time) +func Test_NewConcurrentMap(t *testing.T) { + myMap := NewConcurrentMap(4) + require.Equal(t, uint32(4), myMap.nChunks) + require.Equal(t, 4, len(myMap.chunks)) + + // 1 is minimum number of chunks + myMap = NewConcurrentMap(0) + require.Equal(t, uint32(1), myMap.nChunks) + require.Equal(t, 1, len(myMap.chunks)) +} + +func Test_ConcurrentMapKeys(t *testing.T) { + myMap := NewConcurrentMap(4) + myMap.Set("1", 0) + myMap.Set("2", 0) + myMap.Set("3", 0) + myMap.Set("4", 0) + + require.Equal(t, 4, len(myMap.Keys())) +} diff --git a/storage/txcache/eviction.go b/storage/txcache/eviction.go index 36faa1a057a..c1215a36766 100644 --- a/storage/txcache/eviction.go +++ b/storage/txcache/eviction.go @@ -16,29 +16,45 @@ type EvictionConfig struct { // doEviction does cache eviction // We do not allow more evictions to start concurrently -func (cache *TxCache) doEviction() { +func (cache *TxCache) doEviction() evictionJournal { if !cache.areThereTooManyTxs() { - return + return evictionJournal{} } cache.evictionMutex.Lock() + defer cache.evictionMutex.Unlock() + + log.Info("TxCache.doEviction()") + + journal := evictionJournal{} if cache.areThereTooManySenders() { countTxs, countSenders := cache.evictOldestSenders() - log.Trace("DoEviction, 1st pass:", "countTxs", countTxs, "countSenders", countSenders) + journal.passOneNumTxs = countTxs + journal.passOneNumSenders = countSenders + journal.evictionPerformed = true } if cache.areThereTooManyTxs() { countTxs, countSenders := cache.evictHighNonceTransactions() - log.Trace("DoEviction, 2nd pass:", "countTxs", countTxs, "countSenders", countSenders) + journal.passTwoNumTxs = countTxs + journal.passTwoNumSenders = countSenders + journal.evictionPerformed = true } if cache.areThereTooManyTxs() && !cache.areThereJustAFewSenders() { steps, countTxs, countSenders := cache.evictSendersWhileTooManyTxs() - log.Trace("DoEviction, 3rd pass:", "steps", steps, "countTxs", countTxs, "countSenders", countSenders) + journal.passThreeNumTxs = countTxs + journal.passThreeNumSenders = countSenders + journal.passThreeNumSteps = steps + journal.evictionPerformed = true + } + + if journal.evictionPerformed { + journal.display() } - cache.evictionMutex.Unlock() + return journal } func (cache *TxCache) areThereTooManySenders() bool { diff --git a/storage/txcache/evictionJournal.go b/storage/txcache/evictionJournal.go new file mode 100644 index 00000000000..c0fd1689b6d --- /dev/null +++ b/storage/txcache/evictionJournal.go @@ -0,0 +1,21 @@ +package txcache + +// evictionJournal keeps a short journal about the eviction process +// This is useful for debugging and reasoning about the eviction +type evictionJournal struct { + evictionPerformed bool + passOneNumTxs uint32 + passOneNumSenders uint32 + passTwoNumTxs uint32 + passTwoNumSenders uint32 + passThreeNumTxs uint32 + passThreeNumSenders uint32 + passThreeNumSteps uint32 +} + +func (journal *evictionJournal) display() { + log.Info("Eviction journal:") + log.Info("1st pass:", "txs", journal.passOneNumTxs, "senders", journal.passOneNumSenders) + log.Info("2nd pass:", "txs", journal.passTwoNumTxs, "senders", journal.passTwoNumSenders) + log.Info("3rd pass:", "steps", journal.passThreeNumSteps, "txs", journal.passThreeNumTxs, "senders", journal.passThreeNumSenders) +} diff --git a/storage/txcache/eviction_test.go b/storage/txcache/eviction_test.go index 3263b6dec62..daef019a8d7 100644 --- a/storage/txcache/eviction_test.go +++ b/storage/txcache/eviction_test.go @@ -2,7 +2,7 @@ package txcache import "testing" -import "github.com/stretchr/testify/assert" +import "github.com/stretchr/testify/require" func Test_EvictOldestSenders(t *testing.T) { config := EvictionConfig{ @@ -18,13 +18,13 @@ func Test_EvictOldestSenders(t *testing.T) { nTxs, nSenders := cache.evictOldestSenders() - assert.Equal(t, uint32(2), nTxs) - assert.Equal(t, uint32(2), nSenders) - assert.Equal(t, int64(1), cache.txListBySender.counter.Get()) - assert.Equal(t, int64(1), cache.txByHash.counter.Get()) + require.Equal(t, uint32(2), nTxs) + require.Equal(t, uint32(2), nSenders) + require.Equal(t, int64(1), cache.txListBySender.counter.Get()) + require.Equal(t, int64(1), cache.txByHash.counter.Get()) } -func Test_DoHighNonceTransactionsEviction(t *testing.T) { +func Test_EvictHighNonceTransactions(t *testing.T) { config := EvictionConfig{ CountThreshold: 400, ALotOfTransactionsForASender: 50, @@ -43,15 +43,33 @@ func Test_DoHighNonceTransactionsEviction(t *testing.T) { cache.AddTx([]byte("hash-carol"), createTx("carol", uint64(1))) - assert.Equal(t, int64(3), cache.txListBySender.counter.Get()) - assert.Equal(t, int64(401), cache.txByHash.counter.Get()) + require.Equal(t, int64(3), cache.txListBySender.counter.Get()) + require.Equal(t, int64(401), cache.txByHash.counter.Get()) nTxs, nSenders := cache.evictHighNonceTransactions() - assert.Equal(t, uint32(50), nTxs) - assert.Equal(t, uint32(0), nSenders) - assert.Equal(t, int64(3), cache.txListBySender.counter.Get()) - assert.Equal(t, int64(351), cache.txByHash.counter.Get()) + require.Equal(t, uint32(50), nTxs) + require.Equal(t, uint32(0), nSenders) + require.Equal(t, int64(3), cache.txListBySender.counter.Get()) + require.Equal(t, int64(351), cache.txByHash.counter.Get()) +} + +func Test_EvictHighNonceTransactions_CoverEmptiedSenderList(t *testing.T) { + config := EvictionConfig{ + CountThreshold: 0, + ALotOfTransactionsForASender: 0, + NumTxsToEvictForASenderWithALot: 1, + } + + cache := NewTxCacheWithEviction(1, config) + cache.AddTx([]byte("hash-alice"), createTx("alice", uint64(1))) + require.Equal(t, int64(1), cache.CountSenders()) + + // Alice is also removed from the map of senders, since it has no transaction left + nTxs, nSenders := cache.evictHighNonceTransactions() + require.Equal(t, uint32(1), nTxs) + require.Equal(t, uint32(1), nSenders) + require.Equal(t, int64(0), cache.CountSenders()) } func Test_EvictSendersWhileTooManyTxs(t *testing.T) { @@ -68,14 +86,56 @@ func Test_EvictSendersWhileTooManyTxs(t *testing.T) { cache.AddTx([]byte{byte(index)}, createTx(sender, uint64(1))) } - assert.Equal(t, int64(200), cache.txListBySender.counter.Get()) - assert.Equal(t, int64(200), cache.txByHash.counter.Get()) + require.Equal(t, int64(200), cache.txListBySender.counter.Get()) + require.Equal(t, int64(200), cache.txByHash.counter.Get()) steps, nTxs, nSenders := cache.evictSendersWhileTooManyTxs() - assert.Equal(t, uint32(6), steps) - assert.Equal(t, uint32(100), nTxs) - assert.Equal(t, uint32(100), nSenders) - assert.Equal(t, int64(100), cache.txListBySender.counter.Get()) - assert.Equal(t, int64(100), cache.txByHash.counter.Get()) + require.Equal(t, uint32(6), steps) + require.Equal(t, uint32(100), nTxs) + require.Equal(t, uint32(100), nSenders) + require.Equal(t, int64(100), cache.txListBySender.counter.Get()) + require.Equal(t, int64(100), cache.txByHash.counter.Get()) +} + +func Test_EvictSendersWhileTooManyTxs_CoverLoopBreak_WhenSmallBatch(t *testing.T) { + config := EvictionConfig{ + CountThreshold: 0, + NumOldestSendersToEvict: 42, + } + + cache := NewTxCacheWithEviction(1, config) + cache.AddTx([]byte("hash-alice"), createTx("alice", uint64(1))) + + // Eviction done in 1 step, since "NumOldestSendersToEvict" > number of senders + steps, nTxs, nSenders := cache.evictSendersWhileTooManyTxs() + require.Equal(t, uint32(1), steps) + require.Equal(t, uint32(1), nTxs) + require.Equal(t, uint32(1), nSenders) +} + +func Test_DoEviction_DoneInPass1_WhenTooManySenders(t *testing.T) { + config := EvictionConfig{ + CountThreshold: 2, + NumOldestSendersToEvict: 2, + } + + cache := NewTxCacheWithEviction(16, config) + cache.AddTx([]byte("hash-alice"), createTx("alice", uint64(1))) + cache.AddTx([]byte("hash-bob"), createTx("bob", uint64(1))) + cache.AddTx([]byte("hash-carol"), createTx("carol", uint64(1))) + + journal := cache.doEviction() + require.Equal(t, uint32(2), journal.passOneNumTxs) + require.Equal(t, uint32(2), journal.passOneNumSenders) + require.Equal(t, uint32(0), journal.passTwoNumTxs) + require.Equal(t, uint32(0), journal.passTwoNumSenders) + require.Equal(t, uint32(0), journal.passThreeNumTxs) + require.Equal(t, uint32(0), journal.passThreeNumSenders) + + // Alice and Bob evicted. Carol still there. + _, ok := cache.GetByTxHash([]byte("hash-carol")) + require.True(t, ok) + require.Equal(t, int64(1), cache.CountSenders()) + require.Equal(t, int64(1), cache.CountTx()) } diff --git a/storage/txcache/txByHashMap.go b/storage/txcache/txByHashMap.go index c2be55e086c..3ec12474823 100644 --- a/storage/txcache/txByHashMap.go +++ b/storage/txcache/txByHashMap.go @@ -22,9 +22,13 @@ func newTxByHashMap(nChunksHint uint32) txByHashMap { } // addTx adds a transaction to the map -func (txMap *txByHashMap) addTx(txHash []byte, tx data.TransactionHandler) { - txMap.backingMap.Set(string(txHash), tx) - txMap.counter.Increment() +func (txMap *txByHashMap) addTx(txHash []byte, tx data.TransactionHandler) bool { + added := txMap.backingMap.SetIfAbsent(string(txHash), tx) + if added { + txMap.counter.Increment() + } + + return added } // removeTx removes a transaction from the map @@ -63,3 +67,29 @@ func (txMap *txByHashMap) RemoveTxsBulk(txHashes [][]byte) uint32 { txMap.counter.Set(int64(newCount)) return nRemoved } + +// ForEachTransaction is an iterator callback +type ForEachTransaction func(txHash []byte, value data.TransactionHandler) + +// forEach iterates over the senders +func (txMap *txByHashMap) forEach(function ForEachTransaction) { + txMap.backingMap.IterCb(func(key string, item interface{}) { + tx := item.(data.TransactionHandler) + function([]byte(key), tx) + }) +} + +func (txMap *txByHashMap) clear() { + txMap.backingMap.Clear() + txMap.counter.Set(0) +} + +func (txMap *txByHashMap) keys() [][]byte { + keys := txMap.backingMap.Keys() + keysAsBytes := make([][]byte, len(keys)) + for i := 0; i < len(keys); i++ { + keysAsBytes[i] = []byte(keys[i]) + } + + return keysAsBytes +} diff --git a/storage/txcache/txCache.go b/storage/txcache/txCache.go index 54042f3a710..7979c237f53 100644 --- a/storage/txcache/txCache.go +++ b/storage/txcache/txCache.go @@ -41,7 +41,10 @@ func NewTxCacheWithEviction(nChunksHint uint32, evictionConfig EvictionConfig) * // AddTx adds a transaction in the cache // Eviction happens if maximum capacity is reached -func (cache *TxCache) AddTx(txHash []byte, tx data.TransactionHandler) { +func (cache *TxCache) AddTx(txHash []byte, tx data.TransactionHandler) (ok bool, added bool) { + ok = false + added = false + if check.IfNil(tx) { return } @@ -50,8 +53,13 @@ func (cache *TxCache) AddTx(txHash []byte, tx data.TransactionHandler) { cache.doEviction() } - cache.txByHash.addTx(txHash, tx) - cache.txListBySender.addTx(txHash, tx) + ok = true + added = cache.txByHash.addTx(txHash, tx) + if added { + cache.txListBySender.addTx(txHash, tx) + } + + return } // GetByTxHash gets the transaction by hash @@ -63,8 +71,9 @@ func (cache *TxCache) GetByTxHash(txHash []byte) (data.TransactionHandler, bool) // GetTransactions gets a reasonably fair list of transactions to be included in the next miniblock // It returns at most "numRequested" transactions // Each sender gets the chance to give at least "batchSizePerSender" transactions, unless "numRequested" limit is reached before iterating over all senders -func (cache *TxCache) GetTransactions(numRequested int, batchSizePerSender int) []data.TransactionHandler { +func (cache *TxCache) GetTransactions(numRequested int, batchSizePerSender int) ([]data.TransactionHandler, [][]byte) { result := make([]data.TransactionHandler, numRequested) + resultHashes := make([][]byte, numRequested) resultFillIndex := 0 resultIsFull := false @@ -74,7 +83,7 @@ func (cache *TxCache) GetTransactions(numRequested int, batchSizePerSender int) cache.forEachSender(func(key string, txList *txListForSender) { // Reset happens on first pass only shouldResetCopy := pass == 0 - copied := txList.copyBatchTo(shouldResetCopy, result[resultFillIndex:], batchSizePerSender) + copied := txList.copyBatchTo(shouldResetCopy, result[resultFillIndex:], resultHashes[resultFillIndex:], batchSizePerSender) resultFillIndex += copied copiedInThisPass += copied @@ -89,10 +98,10 @@ func (cache *TxCache) GetTransactions(numRequested int, batchSizePerSender int) } } - return result[:resultFillIndex] + return result[:resultFillIndex], resultHashes } -// RemoveTxByHash removes +// RemoveTxByHash removes tx by hash func (cache *TxCache) RemoveTxByHash(txHash []byte) error { tx, ok := cache.txByHash.removeTx(string(txHash)) if !ok { @@ -102,7 +111,7 @@ func (cache *TxCache) RemoveTxByHash(txHash []byte) error { found := cache.txListBySender.removeTx(tx) if !found { // This should never happen (eviction should never cause this kind of inconsistency between the two internal maps) - log.Error("RemoveTxByHash detected maps sync inconsistency", "tx", txHash) + log.Error("TxCache.RemoveTxByHash() detected maps sync inconsistency", "tx", txHash) return ErrMapsSyncInconsistency } @@ -114,12 +123,89 @@ func (cache *TxCache) CountTx() int64 { return cache.txByHash.counter.Get() } +// Len is an alias for CountTx +func (cache *TxCache) Len() int { + return int(cache.CountTx()) +} + // CountSenders gets the number of senders in the cache func (cache *TxCache) CountSenders() int64 { return cache.txListBySender.counter.Get() } -// forEachSender iterates over the senders +// forEachSender iterates over the senders in the cache func (cache *TxCache) forEachSender(function ForEachSender) { cache.txListBySender.forEach(function) } + +// ForEachTransaction iterates over the transactions in the cache +func (cache *TxCache) ForEachTransaction(function ForEachTransaction) { + cache.txByHash.forEach(function) +} + +// Clear clears the cache +func (cache *TxCache) Clear() { + cache.txListBySender.clear() + cache.txByHash.clear() +} + +// Put is not implemented +func (cache *TxCache) Put(key []byte, value interface{}) (evicted bool) { + log.Error("TxCache.Put is not implemented") + return false +} + +// Get gets a transaction by hash +func (cache *TxCache) Get(key []byte) (value interface{}, ok bool) { + tx, ok := cache.GetByTxHash(key) + return tx, ok +} + +// Has is not implemented +func (cache *TxCache) Has(key []byte) bool { + log.Error("TxCache.Has is not implemented") + return false +} + +// Peek gets a transaction by hash +func (cache *TxCache) Peek(key []byte) (value interface{}, ok bool) { + tx, ok := cache.GetByTxHash(key) + return tx, ok +} + +// HasOrAdd is not implemented +func (cache *TxCache) HasOrAdd(key []byte, value interface{}) (ok, evicted bool) { + log.Error("TxCache.HasOrAdd is not implemented") + return false, false +} + +// Remove removes tx by hash +func (cache *TxCache) Remove(key []byte) { + _ = cache.RemoveTxByHash(key) +} + +// RemoveOldest is not implemented +func (cache *TxCache) RemoveOldest() { + log.Error("TxCache.RemoveOldest is not implemented") +} + +// Keys returns the tx hashes in the cache +func (cache *TxCache) Keys() [][]byte { + return cache.txByHash.keys() +} + +// MaxSize is not implemented +func (cache *TxCache) MaxSize() int { + log.Error("TxCache.MaxSize is not implemented") + return 0 +} + +// RegisterHandler is not implemented +func (cache *TxCache) RegisterHandler(func(key []byte)) { + log.Error("TxCache.RegisterHandler is not implemented") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (cache *TxCache) IsInterfaceNil() bool { + return cache == nil +} diff --git a/storage/txcache/txCache_test.go b/storage/txcache/txCache_test.go index fc4d01ef5f2..f3453a00742 100644 --- a/storage/txcache/txCache_test.go +++ b/storage/txcache/txCache_test.go @@ -7,44 +7,100 @@ import ( "testing" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/check" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/transaction" - "github.com/stretchr/testify/assert" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/require" ) func Test_AddTx(t *testing.T) { - cache := NewTxCache(4) + cache := NewTxCache(1) - txHash := []byte("hash-1") tx := createTx("alice", 1) - cache.AddTx(txHash, tx) - foundTx, ok := cache.GetByTxHash(txHash) + ok, added := cache.AddTx([]byte("hash-1"), tx) + require.True(t, ok) + require.True(t, added) + + // Add it again (no-operation) + ok, added = cache.AddTx([]byte("hash-1"), tx) + require.True(t, ok) + require.False(t, added) - assert.True(t, ok) - assert.Equal(t, tx, foundTx) + foundTx, ok := cache.GetByTxHash([]byte("hash-1")) + require.True(t, ok) + require.Equal(t, tx, foundTx) +} + +func Test_AddNilTx_DoesNothing(t *testing.T) { + cache := NewTxCache(1) + + txHash := []byte("hash-1") + + ok, added := cache.AddTx(txHash, nil) + require.False(t, ok) + require.False(t, added) + + foundTx, ok := cache.GetByTxHash(txHash) + require.False(t, ok) + require.Nil(t, foundTx) } func Test_RemoveByTxHash(t *testing.T) { cache := NewTxCache(16) + cache.AddTx([]byte("hash-1"), createTx("alice", 1)) + cache.AddTx([]byte("hash-2"), createTx("alice", 2)) + + err := cache.RemoveTxByHash([]byte("hash-1")) + require.Nil(t, err) + cache.Remove([]byte("hash-2")) + + foundTx, ok := cache.GetByTxHash([]byte("hash-1")) + require.False(t, ok) + require.Nil(t, foundTx) + + foundTx, ok = cache.GetByTxHash([]byte("hash-2")) + require.False(t, ok) + require.Nil(t, foundTx) +} + +func Test_CountTx_And_Len(t *testing.T) { + cache := NewTxCache(1) + + cache.AddTx([]byte("hash-1"), createTx("alice", 1)) + cache.AddTx([]byte("hash-2"), createTx("alice", 2)) + cache.AddTx([]byte("hash-3"), createTx("alice", 3)) + + require.Equal(t, int64(3), cache.CountTx()) + require.Equal(t, int(3), cache.Len()) +} + +func Test_GetByTxHash_And_Peek_And_Get(t *testing.T) { + cache := NewTxCache(1) + txHash := []byte("hash-1") tx := createTx("alice", 1) - cache.AddTx(txHash, tx) - err := cache.RemoveTxByHash(txHash) - assert.Nil(t, err) foundTx, ok := cache.GetByTxHash(txHash) + require.True(t, ok) + require.Equal(t, tx, foundTx) - assert.False(t, ok) - assert.Nil(t, foundTx) + foundTxPeek, okPeek := cache.Peek(txHash) + require.True(t, okPeek) + require.Equal(t, tx, foundTxPeek) + + foundTxGet, okGet := cache.Get(txHash) + require.True(t, okGet) + require.Equal(t, tx, foundTxGet) } func Test_RemoveByTxHash_Error_WhenMissing(t *testing.T) { cache := NewTxCache(16) err := cache.RemoveTxByHash([]byte("missing")) - assert.Equal(t, err, ErrTxNotFound) + require.Equal(t, err, ErrTxNotFound) } func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { @@ -58,7 +114,32 @@ func Test_RemoveByTxHash_Error_WhenMapsInconsistency(t *testing.T) { cache.txListBySender.removeTx(tx) err := cache.RemoveTxByHash(txHash) - assert.Equal(t, err, ErrMapsSyncInconsistency) + require.Equal(t, err, ErrMapsSyncInconsistency) +} + +func Test_Clear(t *testing.T) { + cache := NewTxCache(1) + + cache.AddTx([]byte("hash-alice-1"), createTx("alice", 1)) + cache.AddTx([]byte("hash-bob-7"), createTx("bob", 7)) + cache.AddTx([]byte("hash-alice-42"), createTx("alice", 42)) + require.Equal(t, int64(3), cache.CountTx()) + + cache.Clear() + require.Equal(t, int64(0), cache.CountTx()) +} + +func Test_ForEachTransaction(t *testing.T) { + cache := NewTxCache(1) + + cache.AddTx([]byte("hash-alice-1"), createTx("alice", 1)) + cache.AddTx([]byte("hash-bob-7"), createTx("bob", 7)) + + counter := 0 + cache.ForEachTransaction(func(txHash []byte, value data.TransactionHandler) { + counter++ + }) + require.Equal(t, 2, counter) } func Test_GetTransactions_Dummy(t *testing.T) { @@ -73,8 +154,8 @@ func Test_GetTransactions_Dummy(t *testing.T) { cache.AddTx([]byte("hash-bob-5"), createTx("bob", 5)) cache.AddTx([]byte("hash-carol-1"), createTx("carol", 1)) - sorted := cache.GetTransactions(10, 2) - assert.Len(t, sorted, 8) + sorted, _ := cache.GetTransactions(10, 2) + require.Len(t, sorted, 8) } func Test_GetTransactions(t *testing.T) { @@ -87,20 +168,20 @@ func Test_GetTransactions(t *testing.T) { nRequestedTransactions := math.MaxInt16 for senderTag := 0; senderTag < nSenders; senderTag++ { - sender := fmt.Sprintf("sender%d", senderTag) + sender := fmt.Sprintf("sender:%d", senderTag) for txNonce := nTransactionsPerSender; txNonce > 0; txNonce-- { - txHash := fmt.Sprintf("hash%d%d", senderTag, txNonce) + txHash := fmt.Sprintf("hash:%d:%d", senderTag, txNonce) tx := createTx(sender, uint64(txNonce)) cache.AddTx([]byte(txHash), tx) } } - assert.Equal(t, int64(nTotalTransactions), cache.CountTx()) + require.Equal(t, int64(nTotalTransactions), cache.CountTx()) - sorted := cache.GetTransactions(nRequestedTransactions, 2) + sorted, _ := cache.GetTransactions(nRequestedTransactions, 2) - assert.Len(t, sorted, core.MinInt(nRequestedTransactions, nTotalTransactions)) + require.Len(t, sorted, core.MinInt(nRequestedTransactions, nTotalTransactions)) // Check order nonces := make(map[string]uint64, nSenders) @@ -109,11 +190,27 @@ func Test_GetTransactions(t *testing.T) { sender := string(tx.GetSndAddress()) previousNonce := nonces[sender] - assert.LessOrEqual(t, previousNonce, nonce) + require.LessOrEqual(t, previousNonce, nonce) nonces[sender] = nonce } } +func Test_Keys(t *testing.T) { + cache := NewTxCache(16) + + cache.AddTx([]byte("alice-x"), createTx("alice", 42)) + cache.AddTx([]byte("alice-y"), createTx("alice", 43)) + cache.AddTx([]byte("bob-x"), createTx("bob", 42)) + cache.AddTx([]byte("bob-y"), createTx("bob", 43)) + + keys := cache.Keys() + require.Equal(t, 4, len(keys)) + require.Contains(t, keys, []byte("alice-x")) + require.Contains(t, keys, []byte("alice-y")) + require.Contains(t, keys, []byte("bob-x")) + require.Contains(t, keys, []byte("bob-y")) +} + func Test_AddWithEviction_UniformDistribution(t *testing.T) { config := EvictionConfig{ Enabled: true, @@ -126,12 +223,42 @@ func Test_AddWithEviction_UniformDistribution(t *testing.T) { // 5000 * 100 cache := NewTxCacheWithEviction(16, config) addManyTransactionsWithUniformDistribution(cache, 5000, 100) - assert.Equal(t, int64(240000), cache.CountTx()) + require.Equal(t, int64(240000), cache.CountTx()) // 1000 * 1000 cache = NewTxCacheWithEviction(16, config) addManyTransactionsWithUniformDistribution(cache, 1000, 1000) - assert.Equal(t, int64(240000), cache.CountTx()) + require.Equal(t, int64(240000), cache.CountTx()) +} + +func Test_NotImplementedFunctions(t *testing.T) { + cache := NewTxCache(1) + + evicted := cache.Put(nil, nil) + require.False(t, evicted) + + has := cache.Has(nil) + require.False(t, has) + + ok, evicted := cache.HasOrAdd(nil, nil) + require.False(t, ok) + require.False(t, evicted) + + require.NotPanics(t, func() { cache.RemoveOldest() }) + require.NotPanics(t, func() { cache.RegisterHandler(nil) }) + require.Zero(t, cache.MaxSize()) +} + +func Test_IsInterfaceNil(t *testing.T) { + cache := NewTxCache(1) + require.False(t, check.IfNil(cache)) + + makeNil := func() storage.Cacher { + return nil + } + + thisIsNil := makeNil() + require.True(t, check.IfNil(thisIsNil)) } // This seems to be the worst case in terms of eviction complexity @@ -147,7 +274,7 @@ func Benchmark_AddWithEviction_UniformDistribution_250000x1_WithConfig_NumOldest cache := NewTxCacheWithEviction(16, config) addManyTransactionsWithUniformDistribution(cache, 250000, 1) - assert.Equal(b, int64(240000), cache.CountTx()) + require.Equal(b, int64(240000), cache.CountTx()) } func Benchmark_AddWithEviction_UniformDistribution_250000x1_WithConfig_NumOldestSendersToEvict_100(b *testing.B) { @@ -161,7 +288,7 @@ func Benchmark_AddWithEviction_UniformDistribution_250000x1_WithConfig_NumOldest cache := NewTxCacheWithEviction(16, config) addManyTransactionsWithUniformDistribution(cache, 250000, 1) - assert.Equal(b, int64(240000), cache.CountTx()) + require.Equal(b, int64(240000), cache.CountTx()) } func Benchmark_AddWithEviction_UniformDistribution_250000x1_WithConfig_NumOldestSendersToEvict_1000(b *testing.B) { @@ -175,7 +302,7 @@ func Benchmark_AddWithEviction_UniformDistribution_250000x1_WithConfig_NumOldest cache := NewTxCacheWithEviction(16, config) addManyTransactionsWithUniformDistribution(cache, 250000, 1) - assert.Equal(b, int64(240000), cache.CountTx()) + require.Equal(b, int64(240000), cache.CountTx()) } func Benchmark_AddWithEviction_UniformDistribution_10x25000(b *testing.B) { @@ -189,7 +316,7 @@ func Benchmark_AddWithEviction_UniformDistribution_10x25000(b *testing.B) { cache := NewTxCacheWithEviction(16, config) addManyTransactionsWithUniformDistribution(cache, 10, 25000) - assert.Equal(b, int64(240000), cache.CountTx()) + require.Equal(b, int64(240000), cache.CountTx()) } func Benchmark_AddWithEviction_UniformDistribution_1x250000(b *testing.B) { @@ -203,7 +330,7 @@ func Benchmark_AddWithEviction_UniformDistribution_1x250000(b *testing.B) { cache := NewTxCacheWithEviction(16, config) addManyTransactionsWithUniformDistribution(cache, 1, 250000) - assert.Equal(b, int64(240000), cache.CountTx()) + require.Equal(b, int64(240000), cache.CountTx()) } func addManyTransactionsWithUniformDistribution(cache *TxCache, nSenders int, nTransactionsPerSender int) { diff --git a/storage/txcache/txListBySenderMap.go b/storage/txcache/txListBySenderMap.go index de8c0f59abd..7ee047f52a6 100644 --- a/storage/txcache/txListBySenderMap.go +++ b/storage/txcache/txListBySenderMap.go @@ -67,6 +67,7 @@ func (txMap *txListBySenderMap) removeTx(tx data.TransactionHandler) bool { listForSender, ok := txMap.getListForSender(sender) if !ok { + log.Error("txListBySenderMap.removeTx() detected inconsistency: sender of tx not in cache", "sender", sender) return false } @@ -103,12 +104,15 @@ func (txMap *txListBySenderMap) RemoveSendersBulk(senders []string) uint32 { // GetListsSortedByOrderNumber gets the list of sender addreses, sorted by the global order number func (txMap *txListBySenderMap) GetListsSortedByOrderNumber() []*txListForSender { - lists := make([]*txListForSender, txMap.counter.Get()) + counter := txMap.counter.Get() + if counter < 1 { + return make([]*txListForSender, 0) + } + + lists := make([]*txListForSender, 0, counter) - index := 0 txMap.backingMap.IterCb(func(key string, item interface{}) { - lists[index] = item.(*txListForSender) - index++ + lists = append(lists, item.(*txListForSender)) }) sort.Slice(lists, func(i, j int) bool { @@ -128,3 +132,8 @@ func (txMap *txListBySenderMap) forEach(function ForEachSender) { function(key, txList) }) } + +func (txMap *txListBySenderMap) clear() { + txMap.backingMap.Clear() + txMap.counter.Set(0) +} diff --git a/storage/txcache/txListBySenderMap_test.go b/storage/txcache/txListBySenderMap_test.go index 61e0539600c..e8adc2c9f65 100644 --- a/storage/txcache/txListBySenderMap_test.go +++ b/storage/txcache/txListBySenderMap_test.go @@ -1,9 +1,11 @@ package txcache import ( + "fmt" + "sync" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_AddTx_IncrementsCounter(t *testing.T) { @@ -14,7 +16,7 @@ func Test_AddTx_IncrementsCounter(t *testing.T) { myMap.addTx([]byte("b"), createTx("bob", uint64(1))) // There are 2 senders - assert.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, int64(2), myMap.counter.Get()) } func Test_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { @@ -27,18 +29,32 @@ func Test_RemoveTx_AlsoRemovesSenderWhenNoTransactionLeft(t *testing.T) { myMap.addTx([]byte("a"), txAlice1) myMap.addTx([]byte("a"), txAlice2) myMap.addTx([]byte("b"), txBob) - assert.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, int64(2), myMap.counter.Get()) myMap.removeTx(txAlice1) - assert.Equal(t, int64(2), myMap.counter.Get()) + require.Equal(t, int64(2), myMap.counter.Get()) myMap.removeTx(txAlice2) // All alice's transactions have been removed now - assert.Equal(t, int64(1), myMap.counter.Get()) + require.Equal(t, int64(1), myMap.counter.Get()) myMap.removeTx(txBob) // Also Bob has no more transactions - assert.Equal(t, int64(0), myMap.counter.Get()) + require.Equal(t, int64(0), myMap.counter.Get()) +} + +func Test_RemoveSender(t *testing.T) { + myMap := newTxListBySenderMap(1) + + myMap.addTx([]byte("a"), createTx("alice", uint64(1))) + require.Equal(t, int64(1), myMap.counter.Get()) + + // Bob is unknown + myMap.removeSender("bob") + require.Equal(t, int64(1), myMap.counter.Get()) + + myMap.removeSender("alice") + require.Equal(t, int64(0), myMap.counter.Get()) } func Test_GetListsSortedByOrderNumber(t *testing.T) { @@ -52,7 +68,42 @@ func Test_GetListsSortedByOrderNumber(t *testing.T) { lists := myMap.GetListsSortedByOrderNumber() - assert.Equal(t, "alice", lists[0].sender) - assert.Equal(t, "bob", lists[1].sender) - assert.Equal(t, "carol", lists[2].sender) + require.Equal(t, "alice", lists[0].sender) + require.Equal(t, "bob", lists[1].sender) + require.Equal(t, "carol", lists[2].sender) +} + +func Test_GetListsSorted_NoPanic_IfAlsoConcurrentMutation(t *testing.T) { + myMap := newTxListBySenderMap(4) + + for i := 0; i < 100; i++ { + sender := fmt.Sprintf("Sender-%d", i) + hash := createFakeTxHash([]byte(sender), 1) + myMap.addTx(hash, createTx(sender, uint64(1))) + } + + var wg sync.WaitGroup + + for i := 0; i < 1000; i++ { + wg.Add(2) + + go func() { + for j := 0; j < 100; j++ { + myMap.GetListsSortedByOrderNumber() + } + + wg.Done() + }() + + go func() { + for j := 0; j < 1000; j++ { + sender := fmt.Sprintf("Sender-%d", j) + myMap.removeSender(sender) + } + + wg.Done() + }() + } + + wg.Wait() } diff --git a/storage/txcache/txListForSender.go b/storage/txcache/txListForSender.go index fb5e5be1a14..481e12706bd 100644 --- a/storage/txcache/txListForSender.go +++ b/storage/txcache/txListForSender.go @@ -130,7 +130,7 @@ func (listForSender *txListForSender) IsEmpty() bool { // copyBatchTo copies a batch (usually small) of transactions to a destination slice // It also updates the internal state used for copy operations -func (listForSender *txListForSender) copyBatchTo(withReset bool, destination []data.TransactionHandler, batchSize int) int { +func (listForSender *txListForSender) copyBatchTo(withReset bool, destination []data.TransactionHandler, destinationHashes [][]byte, batchSize int) int { // We can't read from multiple goroutines at the same time // And we can't mutate the sender's list while reading it listForSender.mutex.Lock() @@ -156,6 +156,7 @@ func (listForSender *txListForSender) copyBatchTo(withReset bool, destination [] value := element.Value.(txListForSenderNode) destination[copied] = value.tx + destinationHashes[copied] = value.txHash element = element.Next() } diff --git a/storage/txcache/txListForSender_test.go b/storage/txcache/txListForSender_test.go index 35f564c2b11..f6fb50b9ba3 100644 --- a/storage/txcache/txListForSender_test.go +++ b/storage/txcache/txListForSender_test.go @@ -48,6 +48,15 @@ func Test_findTx(t *testing.T) { assert.Nil(t, noElementWithD) } +func Test_findTx_CoverNonceComparisonOptimization(t *testing.T) { + list := newTxListForSender(".", 0) + list.AddTx([]byte("A"), createTx(".", 42)) + + // Find one with a lower nonce, not added to cache + noElement := list.findListElementWithTx(createTx(".", 41)) + assert.Nil(t, noElement) +} + func Test_RemoveTransaction(t *testing.T) { list := newTxListForSender(".", 0) tx := createTx(".", 1) @@ -109,24 +118,25 @@ func Test_CopyBatchTo(t *testing.T) { } destination := make([]data.TransactionHandler, 1000) + destinationHashes := make([][]byte, 1000) // First batch - copied := list.copyBatchTo(true, destination, 50) + copied := list.copyBatchTo(true, destination, destinationHashes, 50) assert.Equal(t, 50, copied) assert.NotNil(t, destination[49]) assert.Nil(t, destination[50]) // Second batch - copied = list.copyBatchTo(false, destination[50:], 50) + copied = list.copyBatchTo(false, destination[50:], destinationHashes[50:], 50) assert.Equal(t, 50, copied) assert.NotNil(t, destination[99]) // No third batch - copied = list.copyBatchTo(false, destination, 50) + copied = list.copyBatchTo(false, destination, destinationHashes, 50) assert.Equal(t, 0, copied) // Restart copy - copied = list.copyBatchTo(true, destination, 12345) + copied = list.copyBatchTo(true, destination, destinationHashes, 12345) assert.Equal(t, 100, copied) } @@ -139,11 +149,13 @@ func Test_CopyBatchTo_NoPanicWhenCornerCases(t *testing.T) { // When empty destination destination := make([]data.TransactionHandler, 0) - copied := list.copyBatchTo(true, destination, 10) + destinationHashes := make([][]byte, 0) + copied := list.copyBatchTo(true, destination, destinationHashes, 10) assert.Equal(t, 0, copied) // When small destination destination = make([]data.TransactionHandler, 5) - copied = list.copyBatchTo(false, destination, 10) + destinationHashes = make([][]byte, 5) + copied = list.copyBatchTo(false, destination, destinationHashes, 10) assert.Equal(t, 5, copied) }