diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 2c3ae91382c..9786f4fe034 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -14,12 +14,12 @@ NodeDisplayName = "" [Explorer] - Enabled = false - IndexerURL = "http://localhost:9200" + Enabled = false + IndexerURL = "http://localhost:9200" [MiniBlocksStorage] [MiniBlocksStorage.Cache] - Size = 100 + Size = 300 Type = "LRU" [MiniBlocksStorage.DB] FilePath = "MiniBlocks" @@ -105,6 +105,17 @@ MaxBatchSize = 45000 MaxOpenFiles = 10 +[RewardTxStorage] + [RewardTxStorage.Cache] + Size = 10000 + Type = "LRU" + [RewardTxStorage.DB] + FilePath = "RewardTransactions" + Type = "LvlDBSerial" + BatchDelaySeconds = 15 + MaxBatchSize = 500 + MaxOpenFiles = 10 + [ShardHdrNonceHashStorage] [ShardHdrNonceHashStorage.Cache] Size = 1000 @@ -143,7 +154,7 @@ Type = "LRU" [TxBlockBodyDataPool] - Size = 100 + Size = 300 Type = "LRU" [StateBlockBodyDataPool] @@ -175,6 +186,10 @@ Size = 100000 Type = "LRU" +[RewardTransactionDataPool] + Size = 5000 + Type = "LRU" + [ShardHeadersDataPool] Size = 1000 Type = "LRU" @@ -218,6 +233,16 @@ MinTimeToWaitBetweenBroadcastsInSec = 20 MaxTimeToWaitBetweenBroadcastsInSec = 25 DurationInSecToConsiderUnresponsive = 60 + [Heartbeat.HeartbeatStorage] + [Heartbeat.HeartbeatStorage.Cache] + Size = 100 + Type = "LRU" + [Heartbeat.HeartbeatStorage.DB] + FilePath = "HeartbeatStorage" + Type = "LvlDBSerial" + BatchDelaySeconds = 15 + MaxBatchSize = 300 + MaxOpenFiles = 10 # Consensus type which will be used (the current implementation can manage "bn" and "bls") # When consensus type is "bls" the multisig hasher type should be "blake2b" @@ -229,3 +254,5 @@ Port = 123 Timeout = 0 # Setting 0 means 'use default value' Version = 0 # Setting 0 means 'use default value' + + diff --git a/cmd/node/config/economics.toml b/cmd/node/config/economics.toml new file mode 100644 index 00000000000..183c7e19a4a --- /dev/null +++ b/cmd/node/config/economics.toml @@ -0,0 +1,14 @@ +#Econimics config of the node +[EconomicsAddresses] + CommunityAddress = "1bedf9f1db526aa98eb61f251e6eb29df64c0a4d96261b6fe9d4df1bc2cf5420" + BurnAddress = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef" + +[RewardsSettings] + RewardsValue = "1000" + CommunityPercentage = 0.10 + LeaderPercentage = 0.50 + BurnPercentage = 0.40 + +[FeeSettings] + MinGasPrice = "0" + MinGasLimit = "5" diff --git a/cmd/node/config/nodesSetup.json b/cmd/node/config/nodesSetup.json index 1f0ee05446a..a655f12fdfb 100644 --- a/cmd/node/config/nodesSetup.json +++ b/cmd/node/config/nodesSetup.json @@ -8,67 +8,88 @@ "metaChainMinNodes": 1, "initialNodes": [ { - "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140" + "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "address": "d4105de8e44aee9d4be670401cec546e5df381028e805012386a05acf76518d9" }, { - "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e" + "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "address": "d11e60011ffc1b7ebb1fd4c92c2821ecef8bed5c518d76a24640153a462cdc1e" }, { - "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a" + "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "address": "0f36a982b79d3c1fda9b82a646a2b423cb3e7223cffbae73a4e3d2c1ea62ee5e" }, { - "pubkey": "3efb714c90dd9442c939429687311a7d24e57005d2c6c80782092175b31786994b12f30e4689231e146647dc85be3f80dd458df813d602f11785793f4a8cd40901b48a64b8ebfb204496e48cadc48ad3aa422e511d8c9e6359f60d7067e55bfb134a658fad6d5a5d8fe051d770d74d82e11edcd7cc48b696e41f7244305b8895" + "pubkey": "3efb714c90dd9442c939429687311a7d24e57005d2c6c80782092175b31786994b12f30e4689231e146647dc85be3f80dd458df813d602f11785793f4a8cd40901b48a64b8ebfb204496e48cadc48ad3aa422e511d8c9e6359f60d7067e55bfb134a658fad6d5a5d8fe051d770d74d82e11edcd7cc48b696e41f7244305b8895", + "address": "8c93db70abe14a6aa8c4ca7b722b67f4342b4251c0f3731b12b5f75885a9b9b6" }, { - "pubkey": "5498d09d5cc1ef68e07b4fbd059ef3309ddfdaf26470514f80fd02cb9789a5772db6515e014efc9f49c8350be25b28c2938155e01e2270071265fef242574da512ef326d66a3113c6b697891e1390c18678bc2af7398863e18d002dab69fdd77819adca791e9528ae272466cd9f09d048fbac16ddb492ca30da9dc69662b1a58" + "pubkey": "5498d09d5cc1ef68e07b4fbd059ef3309ddfdaf26470514f80fd02cb9789a5772db6515e014efc9f49c8350be25b28c2938155e01e2270071265fef242574da512ef326d66a3113c6b697891e1390c18678bc2af7398863e18d002dab69fdd77819adca791e9528ae272466cd9f09d048fbac16ddb492ca30da9dc69662b1a58", + "address": "afb051dc3a1dfb029866730243c2cbc51d8b8ef15951e4da3929f9c8391f307a" }, { - "pubkey": "671a8df542bf8e3e6ddaa9a8ace6bf34b55f86aab4887fde28a2eb0b3dea53cf3b290fe9d5689c8c3dd99b91ce2da0df0636208022816d23f766756ea81cb46b5907f93c5b3071fec8fc88553dfd732f560537c66fc8507f750890abcf23e9900326939a163f4ffdaf1ee6109b7e86babee510613478857211149e80f33bd338" + "pubkey": "671a8df542bf8e3e6ddaa9a8ace6bf34b55f86aab4887fde28a2eb0b3dea53cf3b290fe9d5689c8c3dd99b91ce2da0df0636208022816d23f766756ea81cb46b5907f93c5b3071fec8fc88553dfd732f560537c66fc8507f750890abcf23e9900326939a163f4ffdaf1ee6109b7e86babee510613478857211149e80f33bd338", + "address": "86fe0a4a9bf7dbed6784b8cfbfd5a80d927be30b4debff67e60e1fd05cd2359b" }, { - "pubkey": "7feee0aa8ee11a61f4e91b71481928db7998a8e58deef181ffb013fa3e3c51a7375155c36deb9d09e97edd61dac26b1239c53c2adb50fe2608d467e8669fed9946465500e093442d399b30c74ebb38d1e979d435a5a2226b33e08f5050cc73b4799722a258dcf7e9d7a838014e06dc98ea691f976c0d319d7206b47e30549a37" + "pubkey": "7feee0aa8ee11a61f4e91b71481928db7998a8e58deef181ffb013fa3e3c51a7375155c36deb9d09e97edd61dac26b1239c53c2adb50fe2608d467e8669fed9946465500e093442d399b30c74ebb38d1e979d435a5a2226b33e08f5050cc73b4799722a258dcf7e9d7a838014e06dc98ea691f976c0d319d7206b47e30549a37", + "address": "fdc635bc2bf1477609bea5ba90365a99d4bbb023b2eaffb5c20642a2f2458dfa" }, { - "pubkey": "47cac956e48e385bd811fcfdb1a06bcf26bc09d4f4b4fbb2c64391c2bb6ab32975b6b7b4eb508c925ad6febff7031bd5ffbb3d7e7e02db94f25cbf50af4aee2201a42a404947f4ad6628b1482afadb4fbce34116961cd8e0edf0cdb017d37a7516177059bab03e70ce0ad445554c2f02cd00b183d4c2d4d37793441a0d36f867" + "pubkey": "47cac956e48e385bd811fcfdb1a06bcf26bc09d4f4b4fbb2c64391c2bb6ab32975b6b7b4eb508c925ad6febff7031bd5ffbb3d7e7e02db94f25cbf50af4aee2201a42a404947f4ad6628b1482afadb4fbce34116961cd8e0edf0cdb017d37a7516177059bab03e70ce0ad445554c2f02cd00b183d4c2d4d37793441a0d36f867", + "address": "5bdf4c81489bea69ba29cd3eea2670c1bb6cb5d922fa8cb6e17bca71dfdd49f0" }, { - "pubkey": "13fef1141f6f5c94b03b8597fecbaf800dc4d6128a5ffaa4465ee5036b4e471a292cbc3eea42ceeb1fe5be0e473c1d250a09451200610960564f464a11e3da1a75fdc13a3b108a0a30917726f99832bfe13874e07c5ea82d5a4b23249812b0e22dd81e29600d19a80e933123df3ac8d750192e136e007e80ac7a7a92c953f673" + "pubkey": "13fef1141f6f5c94b03b8597fecbaf800dc4d6128a5ffaa4465ee5036b4e471a292cbc3eea42ceeb1fe5be0e473c1d250a09451200610960564f464a11e3da1a75fdc13a3b108a0a30917726f99832bfe13874e07c5ea82d5a4b23249812b0e22dd81e29600d19a80e933123df3ac8d750192e136e007e80ac7a7a92c953f673", + "address": "22c2e3721a6256a5891ba612ad55343dceb6655388176f981ab2885ed756d6fd" }, { - "pubkey": "1e04f75417887f43a05b5cd2da0d31c0e451931cd2d145f80a08e9c85e3736ea499fa27ece987013a403e6a2595ef12d6d3c6634b6c72e438f96850b7336941c65642820c8dfa38fa8aa1813954832d4fdc42f87622bc5e1f9c51cbc45259cd84af3e89ec7452b38804cfa5260f7d7b97dbbc63e6c3b820d8768e01876af0846" + "pubkey": "1e04f75417887f43a05b5cd2da0d31c0e451931cd2d145f80a08e9c85e3736ea499fa27ece987013a403e6a2595ef12d6d3c6634b6c72e438f96850b7336941c65642820c8dfa38fa8aa1813954832d4fdc42f87622bc5e1f9c51cbc45259cd84af3e89ec7452b38804cfa5260f7d7b97dbbc63e6c3b820d8768e01876af0846", + "address": "f9c28a8369df5ff3f8589a0aaad93d2d8f94f5ad70d898d422c964fdd6a87d0b" }, { - "pubkey": "5466c7ed09d157bdd8b17389d84ca9fd1423eb347e40126840b5736bd3fb0aa52c2452cf7fa9f2f7b9cc53d414c482227036c056452fb8829bb78dd9849a0ed845e875412cba5f044d969ed819a186aa9841e77dae2f7a1c6c25bf73942bf0cd58e3d2d4f2b9117974e3d6b0743c1565d72c41b69ebbfce47bbcf8d642651d8d" + "pubkey": "5466c7ed09d157bdd8b17389d84ca9fd1423eb347e40126840b5736bd3fb0aa52c2452cf7fa9f2f7b9cc53d414c482227036c056452fb8829bb78dd9849a0ed845e875412cba5f044d969ed819a186aa9841e77dae2f7a1c6c25bf73942bf0cd58e3d2d4f2b9117974e3d6b0743c1565d72c41b69ebbfce47bbcf8d642651d8d", + "address": "69e34e6a9e6aeb051f46e15cae1fe7d0f8641b6bcd9ff23ab228c78b1e4418af" }, { - "pubkey": "713a6438056175e7b274e5dd8bffd34f5a266cd1554b837678552557940a7de46cc90d4139bb55d80f81adc1039b0bc723eed51eb3bc225b4cfcd5a91ccbbc373eba65495a57702293ac999bb7a4b6ca0135f67378b69a723e23cf9c45513b0387f6cb286d6e6d0ffaf2bdfcf0e6a28e3559402d830f70a2ed835304261b4321" + "pubkey": "713a6438056175e7b274e5dd8bffd34f5a266cd1554b837678552557940a7de46cc90d4139bb55d80f81adc1039b0bc723eed51eb3bc225b4cfcd5a91ccbbc373eba65495a57702293ac999bb7a4b6ca0135f67378b69a723e23cf9c45513b0387f6cb286d6e6d0ffaf2bdfcf0e6a28e3559402d830f70a2ed835304261b4321", + "address": "d453e66ea50b05ec3c102cdaabbcee172136f53db82ba434ca170a53483d4ad1" }, { - "pubkey": "1f4d1c336ca9758e08311a0b136f6ee6ad20bc8d9e276e508931892343ff8a0e056a96d598aff2f335b4cd98e1ba0902a22f36b86f8d104c0815a96a301df7c606e1c44413f019e0f175f4c6721587ddf620c98713927a7695b002d8bf36b7c04466c51ad43dd170e468bb7edd20b601cf13c1b53cc5384c07f9c61bf220910e" + "pubkey": "1f4d1c336ca9758e08311a0b136f6ee6ad20bc8d9e276e508931892343ff8a0e056a96d598aff2f335b4cd98e1ba0902a22f36b86f8d104c0815a96a301df7c606e1c44413f019e0f175f4c6721587ddf620c98713927a7695b002d8bf36b7c04466c51ad43dd170e468bb7edd20b601cf13c1b53cc5384c07f9c61bf220910e", + "address": "04e61f7bf892ca638451f6efeccf069d7fb5a5c82303aa27e6d28725da8ae1df" }, { - "pubkey": "2112a7a4468403b38d9d352fcf9fc1d1a20ddfbe4c1190a59a526a9460e6791f201589d5714adf4c390e156e204d21b2f2327d64255f4b94ff7dbe1acee47fe5352cece033a9e6e339a15ba094e73e0fbb2da49b29416b1017d61bd52884e0b22aab88a70047c64849d134c6af9fba69bbb2950a8fae3225aa7f462984efad3f" + "pubkey": "2112a7a4468403b38d9d352fcf9fc1d1a20ddfbe4c1190a59a526a9460e6791f201589d5714adf4c390e156e204d21b2f2327d64255f4b94ff7dbe1acee47fe5352cece033a9e6e339a15ba094e73e0fbb2da49b29416b1017d61bd52884e0b22aab88a70047c64849d134c6af9fba69bbb2950a8fae3225aa7f462984efad3f", + "address": "97d0f43b88e104aa9b0cc98c5cea96f5468a59d3986d2d187b19319a5911b7ff" }, { - "pubkey": "484f2fa2dab11d0f6276467090d5b33c077d13b61ee57834f481feec52423c3e8d83f4957153cad0e3baea68e6eb6e2cb26da69751c43024818cd4f0778219ac6637ddcb08f07528f9670e6f6da4ced010d7b3a2d3fdcf28b3455ef5644a7b7b170b5ebfc6b6d66d9e37fd58a7ecce98b047c01212fd7547bd4fb9f1f99372f4" + "pubkey": "484f2fa2dab11d0f6276467090d5b33c077d13b61ee57834f481feec52423c3e8d83f4957153cad0e3baea68e6eb6e2cb26da69751c43024818cd4f0778219ac6637ddcb08f07528f9670e6f6da4ced010d7b3a2d3fdcf28b3455ef5644a7b7b170b5ebfc6b6d66d9e37fd58a7ecce98b047c01212fd7547bd4fb9f1f99372f4", + "address": "8e660d69a8d99e9cb15323c0c8db36f1f432231a1b9a74da8ffa44a2b9abc7fe" }, { - "pubkey": "3bd6d27ae320fc07e19efb93b890fd8c869429fa891f97f93cdcb581fc3a085d162522eb79e6ae19f838d2cbabc3a497751c952e618976cfb763b807d3877036028ccc52f506b6ae2b92a82cf07de343af79790de61568e4f80eaa1934a67faa07dc140b0f02b39f510be929c2a7d097a7e0d0e828a5ed7d0e18a91d42543beb" + "pubkey": "3bd6d27ae320fc07e19efb93b890fd8c869429fa891f97f93cdcb581fc3a085d162522eb79e6ae19f838d2cbabc3a497751c952e618976cfb763b807d3877036028ccc52f506b6ae2b92a82cf07de343af79790de61568e4f80eaa1934a67faa07dc140b0f02b39f510be929c2a7d097a7e0d0e828a5ed7d0e18a91d42543beb", + "address": "a901ae67ca50d4af01f813da27613f124137be835a5d6902697ec719b2df704f" }, { - "pubkey": "7a2e2aabf1c030677921ce3d31fbeaa9eb4fdddfb97bd5714e351165f10d76b775ec01908e934711c4a2ab6c39be450fb5dd4390c30695563b6e679fa8a0e360561840c2dc3e39281077b5be7b1946806b92041cc0259be754ecd9e6a12a44bd301e1d380c3ae096acfae70e479b2d33b9be2cc993d03bb5517cd74584db3fca" + "pubkey": "7a2e2aabf1c030677921ce3d31fbeaa9eb4fdddfb97bd5714e351165f10d76b775ec01908e934711c4a2ab6c39be450fb5dd4390c30695563b6e679fa8a0e360561840c2dc3e39281077b5be7b1946806b92041cc0259be754ecd9e6a12a44bd301e1d380c3ae096acfae70e479b2d33b9be2cc993d03bb5517cd74584db3fca", + "address": "6b0dcc478115c270f2a6c6a9809c04b61eff8a5877b837d86810396fdb50feda" }, { - "pubkey": "306d6a4e09b88e5147fb475361db2f7b27ce4f2cae78a2dc7ced564a75043e5f84a9830eaa23137ac01ef8e4763fb6870bb62cf184596df8f15f41c535b2f6430a78957c29a9934533bf5df6014961879df399044d1cab57442ef36ef743ee02571495cc7a8f1dd9d573721131677759c532e62f946c9c969b5668862e817db6" + "pubkey": "306d6a4e09b88e5147fb475361db2f7b27ce4f2cae78a2dc7ced564a75043e5f84a9830eaa23137ac01ef8e4763fb6870bb62cf184596df8f15f41c535b2f6430a78957c29a9934533bf5df6014961879df399044d1cab57442ef36ef743ee02571495cc7a8f1dd9d573721131677759c532e62f946c9c969b5668862e817db6", + "address": "c53b7e4463091a999e002c75ed55c79e1f4c64e91ca8ba1b72d984dea9c0e477" }, { - "pubkey": "34404c84cf05c649a6f9c2bb3af33753ef0d186ba2363d5ed2892a4cf39f3f361f563dc66e5623a27a54c24edd417fa20c0f6361016652159b3a22d7c1ff5ef511ed0b04ee3ed101b2627ef64c5e6ee8b17c8a2db95ded5a9f7edf33520612c5269795ba1aec09bd178d185fe7e4d4360fdb3e51b484114fcb2cd9499fbc84a2" + "pubkey": "34404c84cf05c649a6f9c2bb3af33753ef0d186ba2363d5ed2892a4cf39f3f361f563dc66e5623a27a54c24edd417fa20c0f6361016652159b3a22d7c1ff5ef511ed0b04ee3ed101b2627ef64c5e6ee8b17c8a2db95ded5a9f7edf33520612c5269795ba1aec09bd178d185fe7e4d4360fdb3e51b484114fcb2cd9499fbc84a2", + "address": "18e6af48dad7fd4902991efb019e741e0f2a7a192c8678b1da3f4cf42c164519" }, { - "pubkey": "4bc468602245263f7366d7745c0d064aa311fbeb569751796e0d01878fc8723f45a67bfd1070fc8f90bc6ebb9f4e0c5024fda12e97ccaa52ea9f4e82673f29aa45e569a63ea929b4eb80cf421cb4e2b6f6a3b5d5216de2644bd6dcba4fa8a5cf7ab3ebadaeafcd6db8fc77f4168f2fa158f394916a9204dbc5760471ea8085bb" + "pubkey": "4bc468602245263f7366d7745c0d064aa311fbeb569751796e0d01878fc8723f45a67bfd1070fc8f90bc6ebb9f4e0c5024fda12e97ccaa52ea9f4e82673f29aa45e569a63ea929b4eb80cf421cb4e2b6f6a3b5d5216de2644bd6dcba4fa8a5cf7ab3ebadaeafcd6db8fc77f4168f2fa158f394916a9204dbc5760471ea8085bb", + "address": "95fe2d76c72ada51156aed96d083c993d637d7a772fb48efeb8bc3f3cedc7237" }, { - "pubkey": "85aa805512065ca85706a6ffe6e21ef635cb22ab862ab19a02a9572e6d14ad85794b2952a6e00cd87f43c657f006dc1dde45e04cddab85b2b5f20e70cb11f2045e7f94fe901353f8b75c0577f92e00b25e72a4790c7b391f33c0066fb38b2e66586706c06e159d342ecebd7f9bdfe83f3d3c7f395a7879096514d74c5d4e88aa" + "pubkey": "85aa805512065ca85706a6ffe6e21ef635cb22ab862ab19a02a9572e6d14ad85794b2952a6e00cd87f43c657f006dc1dde45e04cddab85b2b5f20e70cb11f2045e7f94fe901353f8b75c0577f92e00b25e72a4790c7b391f33c0066fb38b2e66586706c06e159d342ecebd7f9bdfe83f3d3c7f395a7879096514d74c5d4e88aa", + "address": "d6ad6476141dd798dc7b009b92b8c2d50a8caff8452a459548aa5ccb6c11b6c3" } ] -} \ No newline at end of file +} diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index ffa5e7f70b4..5d393f48de4 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -27,6 +27,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/address" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/blockchain" "github.com/ElrondNetwork/elrond-go/data/state" @@ -53,13 +54,15 @@ import ( "github.com/ElrondNetwork/elrond-go/p2p/loadBalancer" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" + "github.com/ElrondNetwork/elrond-go/process/block/poolsCleaner" "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" processSync "github.com/ElrondNetwork/elrond-go/process/sync" - "github.com/ElrondNetwork/elrond-go/process/track" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -89,8 +92,10 @@ const ( var log = logger.DefaultLogger() -//TODO: Extract all others error messages from this file in some defined errors +const maxTxNonceDeltaAllowed = 15000 + // ErrCreateForkDetector signals that a fork detector could not be created +//TODO: Extract all others error messages from this file in some defined errors var ErrCreateForkDetector = errors.New("could not create fork detector") // Network struct holds the network components of the Elrond protocol @@ -140,7 +145,6 @@ type Process struct { Rounder consensus.Rounder ForkDetector process.ForkDetector BlockProcessor process.BlockProcessor - BlockTracker process.BlocksTracker } type coreComponentsFactoryArgs struct { @@ -207,12 +211,16 @@ func NewStateComponentsFactoryArgs( // StateComponentsFactory creates the state components func StateComponentsFactory(args *stateComponentsFactoryArgs) (*State, error) { - addressConverter, err := addressConverters.NewPlainAddressConverter(args.config.Address.Length, args.config.Address.Prefix) + addressConverter, err := addressConverters.NewPlainAddressConverter( + args.config.Address.Length, + args.config.Address.Prefix, + ) + if err != nil { return nil, errors.New("could not create address converter: " + err.Error()) } - accountFactory, err := factoryState.NewAccountFactoryCreator(args.shardCoordinator) + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) if err != nil { return nil, errors.New("could not create account factory: " + err.Error()) } @@ -242,7 +250,12 @@ type dataComponentsFactoryArgs struct { } // NewDataComponentsFactoryArgs initializes the arguments necessary for creating the data components -func NewDataComponentsFactoryArgs(config *config.Config, shardCoordinator sharding.Coordinator, core *Core, uniqueID string) *dataComponentsFactoryArgs { +func NewDataComponentsFactoryArgs( + config *config.Config, + shardCoordinator sharding.Coordinator, + core *Core, + uniqueID string, +) *dataComponentsFactoryArgs { return &dataComponentsFactoryArgs{ config: config, shardCoordinator: shardCoordinator, @@ -343,12 +356,12 @@ func CryptoComponentsFactory(args *cryptoComponentsFactoryArgs) (*Crypto, error) return nil, errors.New("could not create multisig hasher: " + err.Error()) } - currentShardPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) + currentShardNodesPubKeys, err := args.nodesConfig.InitialNodesPubKeysForShard(args.shardCoordinator.SelfId()) if err != nil { return nil, errors.New("could not start creation of multiSigner: " + err.Error()) } - multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardPubKeys, args.privKey, args.keyGen) + multiSigner, err := createMultiSigner(args.config, multisigHasher, currentShardNodesPubKeys, args.privKey, args.keyGen) if err != nil { return nil, err } @@ -398,9 +411,11 @@ func NetworkComponentsFactory(p2pConfig *config.P2PConfig, log *logger.Logger, c type processComponentsFactoryArgs struct { genesisConfig *sharding.Genesis + economicsData *economics.EconomicsData nodesConfig *sharding.NodesSetup syncer ntp.SyncTimer shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator data *Data core *Core crypto *Crypto @@ -412,9 +427,11 @@ type processComponentsFactoryArgs struct { // NewProcessComponentsFactoryArgs initializes the arguments necessary for creating the process components func NewProcessComponentsFactoryArgs( genesisConfig *sharding.Genesis, + economicsData *economics.EconomicsData, nodesConfig *sharding.NodesSetup, syncer ntp.SyncTimer, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, @@ -424,9 +441,11 @@ func NewProcessComponentsFactoryArgs( ) *processComponentsFactoryArgs { return &processComponentsFactoryArgs{ genesisConfig: genesisConfig, + economicsData: economicsData, nodesConfig: nodesConfig, syncer: syncer, shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, data: data, core: core, crypto: crypto, @@ -439,7 +458,14 @@ func NewProcessComponentsFactoryArgs( // ProcessComponentsFactory creates the process components func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, error) { interceptorContainerFactory, resolversContainerFactory, err := newInterceptorAndResolverContainerFactory( - args.shardCoordinator, args.data, args.core, args.crypto, args.state, args.network) + args.shardCoordinator, + args.nodesCoordinator, + args.data, args.core, + args.crypto, + args.state, + args.network, + args.economicsData, + ) if err != nil { return nil, err } @@ -490,9 +516,11 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err return nil, err } - blockProcessor, blockTracker, err := newBlockProcessorAndTracker( + blockProcessor, err := newBlockProcessor( resolversFinder, args.shardCoordinator, + args.nodesCoordinator, + args.economicsData, args.data, args.core, args.state, @@ -500,6 +528,7 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err shardsGenesisBlocks, args.coreServiceContainer, ) + if err != nil { return nil, err } @@ -510,7 +539,6 @@ func ProcessComponentsFactory(args *processComponentsFactoryArgs) (*Process, err Rounder: rounder, ForkDetector: forkDetector, BlockProcessor: blockProcessor, - BlockTracker: blockTracker, }, nil } @@ -583,25 +611,6 @@ func (srr *seedRandReader) Read(p []byte) (n int, err error) { return len(p), nil } -type nullChronologyValidator struct { -} - -// ValidateReceivedBlock should validate if parameters to be checked are valid -// In this implementation it just returns nil -func (*nullChronologyValidator) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - //TODO when implementing a workable variant take into account to receive headers "from future" (nonce or round > current round) - // as this might happen when clocks are slightly de-synchronized - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ncv *nullChronologyValidator) IsInterfaceNil() bool { - if ncv == nil { - return true - } - return false -} - // CreateStatusHandlerPresenter will return an instance of PresenterStatusHandler func CreateStatusHandlerPresenter() view.Presenter { presenterStatusHandlerFactory := factoryViews.NewPresenterFactory() @@ -651,7 +660,13 @@ func getMarshalizerFromConfig(cfg *config.Config) (marshal.Marshalizer, error) { return nil, errors.New("no marshalizer provided in config file") } -func getTrie(cfg config.StorageConfig, marshalizer marshal.Marshalizer, hasher hashing.Hasher, uniqueID string) (data.Trie, error) { +func getTrie( + cfg config.StorageConfig, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + uniqueID string, +) (data.Trie, error) { + accountsTrieStorage, err := storageUnit.NewStorageUnitFromConf( getCacherFromConfig(cfg.Cache), getDBFromConfig(cfg.DB, uniqueID), @@ -725,7 +740,16 @@ func createShardDataStoreFromConfig( shardCoordinator sharding.Coordinator, uniqueID string, ) (dataRetriever.StorageService, error) { - var headerUnit, peerBlockUnit, miniBlockUnit, txUnit, metachainHeaderUnit, unsignedTxUnit, metaHdrHashNonceUnit, shardHdrHashNonceUnit *storageUnit.Unit + + var headerUnit *storageUnit.Unit + var peerBlockUnit *storageUnit.Unit + var miniBlockUnit *storageUnit.Unit + var txUnit *storageUnit.Unit + var metachainHeaderUnit *storageUnit.Unit + var unsignedTxUnit *storageUnit.Unit + var rewardTxUnit *storageUnit.Unit + var metaHdrHashNonceUnit *storageUnit.Unit + var shardHdrHashNonceUnit *storageUnit.Unit var err error defer func() { @@ -746,6 +770,9 @@ func createShardDataStoreFromConfig( if unsignedTxUnit != nil { _ = unsignedTxUnit.DestroyUnit() } + if rewardTxUnit != nil { + _ = rewardTxUnit.DestroyUnit() + } if metachainHeaderUnit != nil { _ = metachainHeaderUnit.DestroyUnit() } @@ -774,6 +801,14 @@ func createShardDataStoreFromConfig( return nil, err } + rewardTxUnit, err = storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.RewardTxStorage.Cache), + getDBFromConfig(config.RewardTxStorage.DB, uniqueID), + getBloomFromConfig(config.RewardTxStorage.Bloom)) + if err != nil { + return nil, err + } + miniBlockUnit, err = storageUnit.NewStorageUnitFromConf( getCacherFromConfig(config.MiniBlocksStorage.Cache), getDBFromConfig(config.MiniBlocksStorage.DB, uniqueID), @@ -825,6 +860,14 @@ func createShardDataStoreFromConfig( return nil, err } + heartbeatStorageUnit, err := storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.Heartbeat.HeartbeatStorage.Cache), + getDBFromConfig(config.Heartbeat.HeartbeatStorage.DB, uniqueID), + getBloomFromConfig(config.Heartbeat.HeartbeatStorage.Bloom)) + if err != nil { + return nil, err + } + store := dataRetriever.NewChainStorer() store.AddStorer(dataRetriever.TransactionUnit, txUnit) store.AddStorer(dataRetriever.MiniBlockUnit, miniBlockUnit) @@ -832,9 +875,11 @@ func createShardDataStoreFromConfig( store.AddStorer(dataRetriever.BlockHeaderUnit, headerUnit) store.AddStorer(dataRetriever.MetaBlockUnit, metachainHeaderUnit) store.AddStorer(dataRetriever.UnsignedTransactionUnit, unsignedTxUnit) + store.AddStorer(dataRetriever.RewardTransactionUnit, rewardTxUnit) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, metaHdrHashNonceUnit) hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardCoordinator.SelfId()) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) + store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) return store, err } @@ -928,6 +973,14 @@ func createMetaChainDataStoreFromConfig( } } + heartbeatStorageUnit, err := storageUnit.NewStorageUnitFromConf( + getCacherFromConfig(config.Heartbeat.HeartbeatStorage.Cache), + getDBFromConfig(config.Heartbeat.HeartbeatStorage.DB, uniqueID), + getBloomFromConfig(config.Heartbeat.HeartbeatStorage.Bloom)) + if err != nil { + return nil, err + } + store := dataRetriever.NewChainStorer() store.AddStorer(dataRetriever.MetaBlockUnit, metaBlockUnit) store.AddStorer(dataRetriever.MetaShardDataUnit, shardDataUnit) @@ -938,6 +991,7 @@ func createMetaChainDataStoreFromConfig( hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(i) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) } + store.AddStorer(dataRetriever.HeartbeatUnit, heartbeatStorageUnit) return store, err } @@ -957,7 +1011,13 @@ func createShardDataPoolFromConfig( uTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.UnsignedTransactionDataPool)) if err != nil { - log.Info("error creating smart contract result") + log.Info("error creating smart contract result pool") + return nil, err + } + + rewardTxPool, err := shardedData.NewShardedData(getCacherFromConfig(config.RewardTransactionDataPool)) + if err != nil { + log.Info("error creating reward transaction pool") return nil, err } @@ -1004,6 +1064,7 @@ func createShardDataPoolFromConfig( return dataPool.NewShardedDataPool( txPool, uTxPool, + rewardTxPool, hdrPool, hdrNonces, txBlockBody, @@ -1138,17 +1199,36 @@ func createNetMessenger( func newInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, state *State, network *Network, + economics *economics.EconomicsData, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardInterceptorAndResolverContainerFactory(shardCoordinator, data, core, crypto, state, network) + return newShardInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + state, + network, + economics, + ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaInterceptorAndResolverContainerFactory(shardCoordinator, data, core, crypto, network) + return newMetaInterceptorAndResolverContainerFactory( + shardCoordinator, + nodesCoordinator, + data, + core, + crypto, + network, + ) } return nil, nil, errors.New("could not create interceptor and resolver container factory") @@ -1156,16 +1236,19 @@ func newInterceptorAndResolverContainerFactory( func newShardInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, state *State, network *Network, + economics *economics.EconomicsData, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := shard.NewInterceptorsContainerFactory( state.AccountsAdapter, shardCoordinator, + nodesCoordinator, network.NetMessenger, data.Store, core.Marshalizer, @@ -1175,7 +1258,8 @@ func newShardInterceptorAndResolverContainerFactory( crypto.MultiSigner, data.Datapool, state.AddressConverter, - &nullChronologyValidator{}, + maxTxNonceDeltaAllowed, + economics, ) if err != nil { return nil, nil, err @@ -1204,21 +1288,22 @@ func newShardInterceptorAndResolverContainerFactory( func newMetaInterceptorAndResolverContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, data *Data, core *Core, crypto *Crypto, network *Network, ) (process.InterceptorsContainerFactory, dataRetriever.ResolversContainerFactory, error) { - //TODO add a real chronology validator and remove null chronology validator + interceptorContainerFactory, err := metachain.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, network.NetMessenger, data.Store, core.Marshalizer, core.Hasher, crypto.MultiSigner, data.MetaDatapool, - &nullChronologyValidator{}, ) if err != nil { return nil, nil, err @@ -1301,7 +1386,11 @@ func generateGenesisHeadersAndApplyInitialBalances( shardsGenesisBlocks[shardCoordinator.SelfId()] = genesisBlockForCurrentShard - genesisBlock, err := genesis.CreateMetaGenesisBlock(uint64(nodesSetup.StartTime), nodesSetup.InitialNodesPubKeys()) + genesisBlock, err := genesis.CreateMetaGenesisBlock( + uint64(nodesSetup.StartTime), + nodesSetup.InitialNodesPubKeys(), + ) + if err != nil { return nil, err } @@ -1344,7 +1433,7 @@ func createInMemoryShardCoordinatorAndAccount( return nil, nil, err } - accountFactory, err := factoryState.NewAccountFactoryCreator(newShardCoordinator) + accountFactory, err := factoryState.NewAccountFactoryCreator(factoryState.UserAccount) if err != nil { return nil, nil, err } @@ -1372,49 +1461,105 @@ func newForkDetector( return nil, ErrCreateForkDetector } -func newBlockProcessorAndTracker( +func newBlockProcessor( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + economics *economics.EconomicsData, data *Data, core *Core, state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, coreServiceContainer serviceContainer.Core, -) (process.BlockProcessor, process.BlocksTracker, error) { +) (process.BlockProcessor, error) { + + communityAddr := economics.CommunityAddress() + burnAddr := economics.BurnAddress() + if communityAddr == "" || burnAddr == "" { + return nil, errors.New("rewards configuration missing") + } + + communityAddress, err := hex.DecodeString(communityAddr) + if err != nil { + return nil, err + } + + burnAddress, err := hex.DecodeString(burnAddr) + if err != nil { + return nil, err + } + + specialAddressHolder, err := address.NewSpecialAddressHolder( + communityAddress, + burnAddress, + state.AddressConverter, + shardCoordinator, + nodesCoordinator, + ) + if err != nil { + return nil, err + } + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { - return newShardBlockProcessorAndTracker(resolversFinder, shardCoordinator, data, core, state, forkDetector, shardsGenesisBlocks, coreServiceContainer) + return newShardBlockProcessor( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + coreServiceContainer, + economics, + ) } if shardCoordinator.SelfId() == sharding.MetachainShardId { - return newMetaBlockProcessorAndTracker(resolversFinder, shardCoordinator, data, core, state, forkDetector, shardsGenesisBlocks, coreServiceContainer) + return newMetaBlockProcessor( + resolversFinder, + shardCoordinator, + nodesCoordinator, + specialAddressHolder, + data, + core, + state, + forkDetector, + shardsGenesisBlocks, + coreServiceContainer, + ) } - return nil, nil, errors.New("could not create block processor and tracker") + return nil, errors.New("could not create block processor and tracker") } -func newShardBlockProcessorAndTracker( +func newShardBlockProcessor( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, data *Data, core *Core, state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, coreServiceContainer serviceContainer.Core, -) (process.BlockProcessor, process.BlocksTracker, error) { + economics *economics.EconomicsData, +) (process.BlockProcessor, error) { argsParser, err := smartContract.NewAtArgumentParser() if err != nil { - return nil, nil, err + return nil, err } vmFactory, err := shard.NewVMContainerFactory(state.AccountsAdapter, state.AddressConverter) if err != nil { - return nil, nil, err + return nil, err } vmContainer, err := vmFactory.Create() if err != nil { - return nil, nil, err + return nil, err } interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( @@ -1422,20 +1567,38 @@ func newShardBlockProcessorAndTracker( core.Marshalizer, core.Hasher, state.AddressConverter, + specialAddressHandler, data.Store, + data.Datapool, + economics, ) if err != nil { - return nil, nil, err + return nil, err } interimProcContainer, err := interimProcFactory.Create() if err != nil { - return nil, nil, err + return nil, err } scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) if err != nil { - return nil, nil, err + return nil, err + } + + rewardsTxInterim, err := interimProcContainer.Get(dataBlock.RewardsBlock) + if err != nil { + return nil, err + } + + rewardsTxHandler, ok := rewardsTxInterim.(process.TransactionFeeHandler) + if !ok { + return nil, process.ErrWrongTypeAssertion + } + + internalTransactionProducer, ok := rewardsTxInterim.(process.InternalTransactionProducer) + if !ok { + return nil, process.ErrWrongTypeAssertion } scProcessor, err := smartContract.NewSmartContractProcessor( @@ -1448,44 +1611,54 @@ func newShardBlockProcessorAndTracker( state.AddressConverter, shardCoordinator, scForwarder, + rewardsTxHandler, ) if err != nil { - return nil, nil, err + return nil, err } requestHandler, err := requestHandlers.NewShardResolverRequestHandler( resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, factory.MiniBlocksTopic, factory.HeadersTopic, factory.MetachainBlocksTopic, MaxTxsToRequest, ) if err != nil { - return nil, nil, err + return nil, err } - transactionProcessor, err := transaction.NewTxProcessor( + rewardsTxProcessor, err := rewardTransaction.NewRewardTxProcessor( state.AccountsAdapter, - core.Hasher, state.AddressConverter, - core.Marshalizer, shardCoordinator, - scProcessor, + rewardsTxInterim, ) if err != nil { - return nil, nil, errors.New("could not create transaction processor: " + err.Error()) + return nil, err } - blockTracker, err := track.NewShardBlockTracker( - data.Datapool, + txTypeHandler, err := coordinator.NewTxTypeHandler(state.AddressConverter, shardCoordinator, state.AccountsAdapter) + if err != nil { + return nil, err + } + + transactionProcessor, err := transaction.NewTxProcessor( + state.AccountsAdapter, + core.Hasher, + state.AddressConverter, core.Marshalizer, shardCoordinator, - data.Store, + scProcessor, + rewardsTxHandler, + txTypeHandler, + economics, ) if err != nil { - return nil, nil, err + return nil, errors.New("could not create transaction processor: " + err.Error()) } preProcFactory, err := shard.NewPreProcessorsContainerFactory( @@ -1500,14 +1673,17 @@ func newShardBlockProcessorAndTracker( transactionProcessor, scProcessor, scProcessor, + rewardsTxProcessor, + internalTransactionProducer, + economics, ) if err != nil { - return nil, nil, err + return nil, err } preProcContainer, err := preProcFactory.Create() if err != nil { - return nil, nil, err + return nil, err } txCoordinator, err := coordinator.NewTransactionCoordinator( @@ -1519,88 +1695,105 @@ func newShardBlockProcessorAndTracker( interimProcContainer, ) if err != nil { - return nil, nil, err + return nil, err + } + + txPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner( + state.AccountsAdapter, + shardCoordinator, + data.Datapool, + state.AddressConverter, + ) + if err != nil { + return nil, err } argumentsBaseProcessor := block.ArgBaseProcessor{ - Accounts: state.AccountsAdapter, - ForkDetector: forkDetector, - Hasher: core.Hasher, - Marshalizer: core.Marshalizer, - Store: data.Store, - ShardCoordinator: shardCoordinator, - Uint64Converter: core.Uint64ByteSliceConverter, - StartHeaders: shardsGenesisBlocks, - RequestHandler: requestHandler, - Core: coreServiceContainer, + Accounts: state.AccountsAdapter, + ForkDetector: forkDetector, + Hasher: core.Hasher, + Marshalizer: core.Marshalizer, + Store: data.Store, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: core.Uint64ByteSliceConverter, + StartHeaders: shardsGenesisBlocks, + RequestHandler: requestHandler, + Core: coreServiceContainer, } arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &argumentsBaseProcessor, + ArgBaseProcessor: argumentsBaseProcessor, DataPool: data.Datapool, - BlocksTracker: blockTracker, TxCoordinator: txCoordinator, + TxsPoolsCleaner: txPoolsCleaner, } blockProcessor, err := block.NewShardProcessor(arguments) if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) + return nil, errors.New("could not create block processor: " + err.Error()) } err = blockProcessor.SetAppStatusHandler(core.StatusHandler) if err != nil { - return nil, nil, err + return nil, err } - return blockProcessor, blockTracker, nil + return blockProcessor, nil } -func newMetaBlockProcessorAndTracker( +func newMetaBlockProcessor( resolversFinder dataRetriever.ResolversFinder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, + specialAddressHandler process.SpecialAddressHandler, data *Data, core *Core, state *State, forkDetector process.ForkDetector, shardsGenesisBlocks map[uint32]data.HeaderHandler, coreServiceContainer serviceContainer.Core, -) (process.BlockProcessor, process.BlocksTracker, error) { +) (process.BlockProcessor, error) { requestHandler, err := requestHandlers.NewMetaResolverRequestHandler( resolversFinder, factory.ShardHeadersForMetachainTopic, factory.MetachainBlocksTopic) if err != nil { - return nil, nil, err + return nil, err } - blockTracker, err := track.NewMetaBlockTracker() - if err != nil { - return nil, nil, err + argumentsBaseProcessor := block.ArgBaseProcessor{ + Accounts: state.AccountsAdapter, + ForkDetector: forkDetector, + Hasher: core.Hasher, + Marshalizer: core.Marshalizer, + Store: data.Store, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: core.Uint64ByteSliceConverter, + StartHeaders: shardsGenesisBlocks, + RequestHandler: requestHandler, + Core: coreServiceContainer, + } + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBaseProcessor, + DataPool: data.MetaDatapool, } - metaProcessor, err := block.NewMetaProcessor( - coreServiceContainer, - state.AccountsAdapter, - data.MetaDatapool, - forkDetector, - shardCoordinator, - core.Hasher, - core.Marshalizer, - data.Store, - shardsGenesisBlocks, - requestHandler, - core.Uint64ByteSliceConverter, - ) + metaProcessor, err := block.NewMetaProcessor(arguments) if err != nil { - return nil, nil, errors.New("could not create block processor: " + err.Error()) + return nil, errors.New("could not create block processor: " + err.Error()) } err = metaProcessor.SetAppStatusHandler(core.StatusHandler) if err != nil { - return nil, nil, err + return nil, err } - return metaProcessor, blockTracker, nil + return metaProcessor, nil } + func getCacherFromConfig(cfg config.CacheConfig) storageUnit.CacheConfig { return storageUnit.CacheConfig{ Size: cfg.Size, @@ -1698,7 +1891,14 @@ func decodeAddress(address string) ([]byte, error) { return hex.DecodeString(address) } -func getSk(ctx *cli.Context, log *logger.Logger, skName string, skIndexName string, skPemFileName string) ([]byte, error) { +func getSk( + ctx *cli.Context, + log *logger.Logger, + skName string, + skIndexName string, + skPemFileName string, +) ([]byte, error) { + //if flag is defined, it shall overwrite what was read from pem file if ctx.GlobalIsSet(skName) { encodedSk := []byte(ctx.GlobalString(skName)) diff --git a/cmd/node/main.go b/cmd/node/main.go index 1e666436696..b84645fe5bb 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "math" + "math/big" "net/http" "os" "os/signal" @@ -37,13 +38,14 @@ import ( "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/node/external" "github.com/ElrondNetwork/elrond-go/ntp" + "github.com/ElrondNetwork/elrond-go/process/economics" factoryVM "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" factoryViews "github.com/ElrondNetwork/elrond-go/statusHandler/factory" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/ElrondNetwork/elrond-vm/iele/elrond/node/endpoint" "github.com/google/gops/agent" "github.com/urfave/cli" @@ -106,6 +108,13 @@ VERSION: Usage: "The main configuration file to load", Value: "./config/config.toml", } + // configurationEconomicsFile defines a flag for the path to the economics toml configuration file + configurationEconomicsFile = cli.StringFlag{ + Name: "configEconomics", + Usage: "The economics configuration file to load", + Value: "./config/economics.toml", + } + // p2pConfigurationFile defines a flag for the path to the toml file containing P2P configuration p2pConfigurationFile = cli.StringFlag{ Name: "p2pconfig", @@ -288,6 +297,7 @@ func main() { nodesFile, port, configurationFile, + configurationEconomicsFile, p2pConfigurationFile, txSignSk, sk, @@ -364,6 +374,13 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } log.Info(fmt.Sprintf("Initialized with config from: %s", configurationFileName)) + configurationEconomicsFileName := ctx.GlobalString(configurationEconomicsFile.Name) + economicsConfig, err := loadEconomicsConfig(configurationEconomicsFileName, log) + if err != nil { + return err + } + log.Info(fmt.Sprintf("Initialized with config economics from: %s", configurationEconomicsFileName)) + p2pConfigurationFileName := ctx.GlobalString(p2pConfigurationFile.Name) p2pConfig, err := core.LoadP2PConfig(p2pConfigurationFileName) if err != nil { @@ -482,6 +499,15 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } + nodesCoordinator, err := createNodesCoordinator( + nodesConfig, + generalConfig.GeneralSettings, + pubKey, + coreComponents.Hasher) + if err != nil { + return err + } + stateArgs := factory.NewStateComponentsFactoryArgs(generalConfig, genesisConfig, shardCoordinator, coreComponents) stateComponents, err := factory.StateComponentsFactory(stateArgs) if err != nil { @@ -551,13 +577,26 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return err } - cryptoArgs := factory.NewCryptoComponentsFactoryArgs(ctx, generalConfig, nodesConfig, shardCoordinator, keyGen, - privKey, log, initialBalancesSkPemFile.Name, txSignSk.Name, txSignSkIndex.Name) + cryptoArgs := factory.NewCryptoComponentsFactoryArgs( + ctx, + generalConfig, + nodesConfig, + shardCoordinator, + keyGen, + privKey, + log, + initialBalancesSkPemFile.Name, + txSignSk.Name, + txSignSkIndex.Name, + ) cryptoComponents, err := factory.CryptoComponentsFactory(cryptoArgs) if err != nil { return err } + txSignPk := factory.GetPkEncoded(cryptoComponents.TxSignPubKey) + coreComponents.StatusHandler.SetStringValue(core.MetricPublicKeyTxSign, txSignPk) + sessionInfoFileOutput := fmt.Sprintf("%s:%s\n%s:%s\n%s:%s\n%s:%v\n%s:%s\n%s:%v\n", "PkBlockSign", factory.GetPkEncoded(pubKey), "PkAccount", factory.GetPkEncoded(cryptoComponents.TxSignPubKey), @@ -575,9 +614,6 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - txSignPk := factory.GetPkEncoded(cryptoComponents.TxSignPubKey) - coreComponents.StatusHandler.SetStringValue(core.MetricPublicKeyTxSign, txSignPk) - err = ioutil.WriteFile(filepath.Join(logDirectory, "session.info"), []byte(sessionInfoFileOutput), os.ModePerm) log.LogIfError(err) @@ -611,13 +647,37 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { } } - processArgs := factory.NewProcessComponentsFactoryArgs(genesisConfig, nodesConfig, syncer, shardCoordinator, - dataComponents, coreComponents, cryptoComponents, stateComponents, networkComponents, coreServiceContainer) + economicsData, err := economics.NewEconomicsData(economicsConfig) + if err != nil { + return err + } + + processArgs := factory.NewProcessComponentsFactoryArgs( + genesisConfig, + economicsData, + nodesConfig, + syncer, + shardCoordinator, + nodesCoordinator, + dataComponents, + coreComponents, + cryptoComponents, + stateComponents, + networkComponents, + coreServiceContainer, + ) processComponents, err := factory.ProcessComponentsFactory(processArgs) if err != nil { return err } + var elasticIndexer indexer.Indexer + if coreServiceContainer == nil || coreServiceContainer.IsInterfaceNil() { + elasticIndexer = nil + } else { + elasticIndexer = coreServiceContainer.Indexer() + } + currentNode, err := createNode( generalConfig, nodesConfig, @@ -626,6 +686,7 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { privKey, pubKey, shardCoordinator, + nodesCoordinator, coreComponents, stateComponents, dataComponents, @@ -634,11 +695,16 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { networkComponents, uint64(ctx.GlobalUint(bootstrapRoundIndex.Name)), version, + elasticIndexer, ) if err != nil { return err } + if shardCoordinator.SelfId() == sharding.MetachainShardId { + indexValidatorsListIfNeeded(elasticIndexer, nodesCoordinator) + } + vmAccountsDB, err := hooks.NewVMAccountsDB( stateComponents.AccountsAdapter, stateComponents.AddressConverter, @@ -713,6 +779,18 @@ func startNode(ctx *cli.Context, log *logger.Logger, version string) error { return nil } +func indexValidatorsListIfNeeded(elasticIndexer indexer.Indexer, coordinator sharding.NodesCoordinator) { + if elasticIndexer == nil || elasticIndexer.IsInterfaceNil() { + return + } + + validatorsPubKeys := coordinator.GetAllValidatorsPublicKeys() + + if validatorsPubKeys != nil { + go elasticIndexer.SaveValidatorsPubKeys(validatorsPubKeys) + } +} + func initMetrics( appStatusHandler core.AppStatusHandler, pubKey crypto.PublicKey, @@ -739,7 +817,7 @@ func initMetrics( appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, initUint) appStatusHandler.SetStringValue(core.MetricConsensusState, initString) appStatusHandler.SetStringValue(core.MetricConsensusRoundState, initString) - appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, initString) + appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, "0") appStatusHandler.SetUInt64Value(core.MetricIsSyncing, isSyncing) appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, initString) appStatusHandler.SetUInt64Value(core.MetricNumProcessedTxs, initUint) @@ -749,6 +827,8 @@ func initMetrics( appStatusHandler.SetUInt64Value(core.MetricNumShardHeadersFromPool, initUint) appStatusHandler.SetUInt64Value(core.MetricNumShardHeadersProcessed, initUint) appStatusHandler.SetUInt64Value(core.MetricNumTimesInForkChoice, initUint) + appStatusHandler.SetStringValue(core.MetricPublicKeyTxSign, initString) + appStatusHandler.SetUInt64Value(core.MetricHighestFinalBlockInShard, initUint) } func startStatusPolling( @@ -948,23 +1028,40 @@ func loadMainConfig(filepath string, log *logger.Logger) (*config.Config, error) return cfg, nil } -func createShardCoordinator( - nodesConfig *sharding.NodesSetup, - pubKey crypto.PublicKey, - settingsConfig config.GeneralSettingsConfig, - log *logger.Logger, -) (sharding.Coordinator, core.NodeType, error) { +func loadEconomicsConfig(filepath string, log *logger.Logger) (*config.ConfigEconomics, error) { + cfg := &config.ConfigEconomics{} + err := core.LoadTomlFile(cfg, filepath, log) + if err != nil { + return nil, err + } + return cfg, nil +} +func getShardIdFromNodePubKey(pubKey crypto.PublicKey, nodesConfig *sharding.NodesSetup) (uint32, error) { if pubKey == nil { - return nil, "", errors.New("nil public key, could not create shard coordinator") + return 0, errors.New("nil public key") } publicKey, err := pubKey.ToByteArray() if err != nil { - return nil, "", err + return 0, err } selfShardId, err := nodesConfig.GetShardIDForPubKey(publicKey) + if err != nil { + return 0, err + } + + return selfShardId, err +} + +func createShardCoordinator( + nodesConfig *sharding.NodesSetup, + pubKey crypto.PublicKey, + settingsConfig config.GeneralSettingsConfig, + log *logger.Logger, +) (sharding.Coordinator, core.NodeType, error) { + selfShardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) nodeType := core.NodeTypeValidator if err == sharding.ErrPublicKeyNotFoundInGenesis { nodeType = core.NodeTypeObserver @@ -992,6 +1089,55 @@ func createShardCoordinator( return shardCoordinator, nodeType, nil } +func createNodesCoordinator( + nodesConfig *sharding.NodesSetup, + settingsConfig config.GeneralSettingsConfig, + pubKey crypto.PublicKey, + hasher hashing.Hasher, +) (sharding.NodesCoordinator, error) { + + shardId, err := getShardIdFromNodePubKey(pubKey, nodesConfig) + if err == sharding.ErrPublicKeyNotFoundInGenesis { + shardId, err = processDestinationShardAsObserver(settingsConfig) + } + if err != nil { + return nil, err + } + + nbShards := nodesConfig.NumberOfShards() + shardConsensusGroupSize := int(nodesConfig.ConsensusGroupSize) + metaConsensusGroupSize := int(nodesConfig.MetaChainConsensusGroupSize) + initNodesInfo := nodesConfig.InitialNodesInfo() + initValidators := make(map[uint32][]sharding.Validator) + + for shardId, nodeInfoList := range initNodesInfo { + validators := make([]sharding.Validator, 0) + for _, nodeInfo := range nodeInfoList { + validator, err := sharding.NewValidator(big.NewInt(0), 0, nodeInfo.PubKey(), nodeInfo.Address()) + if err != nil { + return nil, err + } + + validators = append(validators, validator) + } + initValidators[shardId] = validators + } + + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + shardConsensusGroupSize, + metaConsensusGroupSize, + hasher, + shardId, + nbShards, + initValidators, + ) + if err != nil { + return nil, err + } + + return nodesCoordinator, nil +} + func processDestinationShardAsObserver(settingsConfig config.GeneralSettingsConfig) (uint32, error) { destShard := strings.ToLower(settingsConfig.DestinationShardAsObserver) if len(destShard) == 0 { @@ -1060,6 +1206,7 @@ func createNode( privKey crypto.PrivateKey, pubKey crypto.PublicKey, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, core *factory.Core, state *factory.State, data *factory.Data, @@ -1068,6 +1215,7 @@ func createNode( network *factory.Network, bootstrapRoundIndex uint64, version string, + indexer indexer.Indexer, ) (*node.Node, error) { consensusGroupSize, err := getConsensusGroupSize(nodesConfig, shardCoordinator) if err != nil { @@ -1087,10 +1235,10 @@ func createNode( node.WithConsensusGroupSize(int(consensusGroupSize)), node.WithSyncer(syncer), node.WithBlockProcessor(process.BlockProcessor), - node.WithBlockTracker(process.BlockTracker), node.WithGenesisTime(time.Unix(nodesConfig.StartTime, 0)), node.WithRounder(process.Rounder), node.WithShardCoordinator(shardCoordinator), + node.WithNodesCoordinator(nodesCoordinator), node.WithUint64ByteSliceConverter(core.Uint64ByteSliceConverter), node.WithSingleSigner(crypto.SingleSigner), node.WithMultiSigner(crypto.MultiSigner), @@ -1107,6 +1255,7 @@ func createNode( node.WithTxStorageSize(config.TxStorage.Cache.Size), node.WithBootstrapRoundIndex(bootstrapRoundIndex), node.WithAppStatusHandler(core.StatusHandler), + node.WithIndexer(indexer), ) if err != nil { return nil, errors.New("error creating node: " + err.Error()) diff --git a/config/config.go b/config/config.go index 876a45998c4..40249c12445 100644 --- a/config/config.go +++ b/config/config.go @@ -63,6 +63,7 @@ type Config struct { BlockHeaderStorage StorageConfig TxStorage StorageConfig UnsignedTransactionStorage StorageConfig + RewardTxStorage StorageConfig ShardHdrNonceHashStorage StorageConfig MetaHdrNonceHashStorage StorageConfig @@ -80,6 +81,7 @@ type Config struct { BlockHeaderNoncesDataPool CacheConfig TxDataPool CacheConfig UnsignedTransactionDataPool CacheConfig + RewardTransactionDataPool CacheConfig MetaBlockBodyDataPool CacheConfig MiniBlockHeaderHashesDataPool CacheConfig @@ -133,6 +135,7 @@ type HeartbeatConfig struct { MinTimeToWaitBetweenBroadcastsInSec int MaxTimeToWaitBetweenBroadcastsInSec int DurationInSecToConsiderUnresponsive int + HeartbeatStorage StorageConfig } // GeneralSettingsConfig will hold the general settings for a node diff --git a/config/economicsConfig.go b/config/economicsConfig.go new file mode 100644 index 00000000000..54c28875c7f --- /dev/null +++ b/config/economicsConfig.go @@ -0,0 +1,28 @@ +package config + +// EconomicsAddresses will hold economics addresses +type EconomicsAddresses struct { + CommunityAddress string + BurnAddress string +} + +// RewardsSettings will hold economics rewards settings +type RewardsSettings struct { + RewardsValue string + CommunityPercentage float64 + LeaderPercentage float64 + BurnPercentage float64 +} + +// FeeSettings will hold economics fee settings +type FeeSettings struct { + MinGasPrice string + MinGasLimit string +} + +// ConfigEconomics will hold economics config +type ConfigEconomics struct { + EconomicsAddresses EconomicsAddresses + RewardsSettings RewardsSettings + FeeSettings FeeSettings +} diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 460a36f97e2..a63f3c769bc 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -1,6 +1,7 @@ package config import ( + "fmt" "strconv" "testing" @@ -107,6 +108,7 @@ func TestTomlParser(t *testing.T) { [Consensus] Type = "` + consensusType + `" + ` cfg := Config{} @@ -115,3 +117,52 @@ func TestTomlParser(t *testing.T) { assert.Nil(t, err) assert.Equal(t, cfgExpected, cfg) } + +func TestTomlEconomicsParser(t *testing.T) { + communityAddress := "commAddr" + burnAddress := "burnAddr" + rewardsValue := "1000000000000000000000000000000000" + communityPercentage := 0.1 + leaderPercentage := 0.1 + burnPercentage := 0.8 + minGasPrice := "18446744073709551615" + minGasLimit := "18446744073709551615" + + cfgEconomicsExpected := ConfigEconomics{ + EconomicsAddresses: EconomicsAddresses{ + CommunityAddress: communityAddress, + BurnAddress: burnAddress, + }, + RewardsSettings: RewardsSettings{ + RewardsValue: rewardsValue, + CommunityPercentage: communityPercentage, + LeaderPercentage: leaderPercentage, + BurnPercentage: burnPercentage, + }, + FeeSettings: FeeSettings{ + MinGasPrice: minGasPrice, + MinGasLimit: minGasLimit, + }, + } + + testString := ` +[EconomicsAddresses] + CommunityAddress = "` + communityAddress + `" + BurnAddress = "` + burnAddress + `" +[RewardsSettings] + RewardsValue = "` + rewardsValue + `" + CommunityPercentage = ` + fmt.Sprintf("%.6f", communityPercentage) + ` + LeaderPercentage = ` + fmt.Sprintf("%.6f", leaderPercentage) + ` + BurnPercentage = ` + fmt.Sprintf("%.6f", burnPercentage) + ` +[FeeSettings] + MinGasPrice = "` + minGasPrice + `" + MinGasLimit = "` + minGasLimit + `" +` + + cfg := ConfigEconomics{} + + err := toml.Unmarshal([]byte(testString), &cfg) + + assert.Nil(t, err) + assert.Equal(t, cfgEconomicsExpected, cfg) +} diff --git a/consensus/broadcast/shardChainMessenger_test.go b/consensus/broadcast/shardChainMessenger_test.go index 7f9a959ba58..ab21755bd59 100644 --- a/consensus/broadcast/shardChainMessenger_test.go +++ b/consensus/broadcast/shardChainMessenger_test.go @@ -1,7 +1,6 @@ package broadcast_test import ( - "github.com/ElrondNetwork/elrond-go/process/factory" "testing" "time" @@ -9,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/stretchr/testify/assert" ) diff --git a/consensus/interface.go b/consensus/interface.go index 55c7637db9c..94c29bac2ef 100644 --- a/consensus/interface.go +++ b/consensus/interface.go @@ -1,7 +1,6 @@ package consensus import ( - "math/big" "time" "github.com/ElrondNetwork/elrond-go/data" @@ -53,29 +52,6 @@ type SposFactory interface { IsInterfaceNil() bool } -// Validator defines what a consensus validator implementation should do. -type Validator interface { - Stake() *big.Int - Rating() int32 - PubKey() []byte - IsInterfaceNil() bool -} - -// ValidatorGroupSelector defines the behaviour of a struct able to do validator group selection -type ValidatorGroupSelector interface { - PublicKeysSelector - LoadEligibleList(eligibleList []Validator) error - ComputeValidatorsGroup(randomness []byte) (validatorsGroup []Validator, err error) - ConsensusGroupSize() int - SetConsensusGroupSize(int) error -} - -// PublicKeysSelector allows retrieval of eligible validators public keys selected by a bitmap -type PublicKeysSelector interface { - GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) - IsInterfaceNil() bool -} - // BroadcastMessenger defines the behaviour of the broadcast messages by the consensus group type BroadcastMessenger interface { BroadcastBlock(data.BodyHandler, data.HeaderHandler) error diff --git a/consensus/mock/blockProcessorMock.go b/consensus/mock/blockProcessorMock.go index 8129a3e29b7..d77ac805df7 100644 --- a/consensus/mock/blockProcessorMock.go +++ b/consensus/mock/blockProcessorMock.go @@ -38,7 +38,7 @@ func (blProcMock *BlockProcessorMock) RevertAccountState() { blProcMock.RevertAccountStateCalled() } -// CreateTxBlockBody mocks the creation of a transaction block body +// CreateBlockBody mocks the creation of a transaction block body func (blProcMock *BlockProcessorMock) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { return blProcMock.CreateBlockCalled(round, haveTime) } @@ -67,6 +67,9 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/consensus/mock/blocksTrackerMock.go b/consensus/mock/blocksTrackerMock.go deleted file mode 100644 index 864fadad627..00000000000 --- a/consensus/mock/blocksTrackerMock.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -type BlocksTrackerMock struct { - UnnotarisedBlocksCalled func() []data.HeaderHandler - RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error - AddBlockCalled func(headerHandler data.HeaderHandler) - SetBlockBroadcastRoundCalled func(nonce uint64, round int64) - BlockBroadcastRoundCalled func(nonce uint64) int64 -} - -func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { - return btm.UnnotarisedBlocksCalled() -} - -func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return btm.RemoveNotarisedBlocksCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { - btm.AddBlockCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int64) { - btm.SetBlockBroadcastRoundCalled(nonce, round) -} - -func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int64 { - return btm.BlockBroadcastRoundCalled(nonce) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (btm *BlocksTrackerMock) IsInterfaceNil() bool { - if btm == nil { - return true - } - return false -} diff --git a/consensus/mock/consensusDataContainerMock.go b/consensus/mock/consensusDataContainerMock.go index 05f97e29f2f..107f20dd69a 100644 --- a/consensus/mock/consensusDataContainerMock.go +++ b/consensus/mock/consensusDataContainerMock.go @@ -14,7 +14,6 @@ import ( type ConsensusCoreMock struct { blockChain data.ChainHandler blockProcessor process.BlockProcessor - blocksTracker process.BlocksTracker bootstrapper process.Bootstrapper broadcastMessenger consensus.BroadcastMessenger chronologyHandler consensus.ChronologyHandler @@ -26,7 +25,7 @@ type ConsensusCoreMock struct { rounder consensus.Rounder shardCoordinator sharding.Coordinator syncTimer ntp.SyncTimer - validatorGroupSelector consensus.ValidatorGroupSelector + validatorGroupSelector sharding.NodesCoordinator } func (cdc *ConsensusCoreMock) Blockchain() data.ChainHandler { @@ -37,10 +36,6 @@ func (cdc *ConsensusCoreMock) BlockProcessor() process.BlockProcessor { return cdc.blockProcessor } -func (cdc *ConsensusCoreMock) BlocksTracker() process.BlocksTracker { - return cdc.blocksTracker -} - func (cdc *ConsensusCoreMock) BootStrapper() process.Bootstrapper { return cdc.bootstrapper } @@ -77,7 +72,7 @@ func (cdc *ConsensusCoreMock) SyncTimer() ntp.SyncTimer { return cdc.syncTimer } -func (cdc *ConsensusCoreMock) ValidatorGroupSelector() consensus.ValidatorGroupSelector { +func (cdc *ConsensusCoreMock) NodesCoordinator() sharding.NodesCoordinator { return cdc.validatorGroupSelector } @@ -124,7 +119,7 @@ func (cdc *ConsensusCoreMock) SetSyncTimer(syncTimer ntp.SyncTimer) { cdc.syncTimer = syncTimer } -func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector consensus.ValidatorGroupSelector) { +func (cdc *ConsensusCoreMock) SetValidatorGroupSelector(validatorGroupSelector sharding.NodesCoordinator) { cdc.validatorGroupSelector = validatorGroupSelector } diff --git a/consensus/mock/consensusStateMock.go b/consensus/mock/consensusStateMock.go index 84e17dee6ee..ae0a2562ed9 100644 --- a/consensus/mock/consensusStateMock.go +++ b/consensus/mock/consensusStateMock.go @@ -1,13 +1,16 @@ package mock -import "github.com/ElrondNetwork/elrond-go/consensus" +import ( + "github.com/ElrondNetwork/elrond-go/consensus" + "github.com/ElrondNetwork/elrond-go/sharding" +) type ConsensusStateMock struct { ResetConsensusStateCalled func() IsNodeLeaderInCurrentRoundCalled func(node string) bool IsSelfLeaderInCurrentRoundCalled func() bool GetLeaderCalled func() (string, error) - GetNextConsensusGroupCalled func(randomSource string, vgs consensus.ValidatorGroupSelector) ([]string, error) + GetNextConsensusGroupCalled func(randomSource string, vgs sharding.NodesCoordinator) ([]string, error) IsConsensusDataSetCalled func() bool IsConsensusDataEqualCalled func(data []byte) bool IsJobDoneCalled func(node string, currentSubroundId int) bool @@ -17,13 +20,12 @@ type ConsensusStateMock struct { IsBlockBodyAlreadyReceivedCalled func() bool IsHeaderAlreadyReceivedCalled func() bool CanDoSubroundJobCalled func(currentSubroundId int) bool - CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, - currentSubroundId int) bool - GenerateBitmapCalled func(subroundId int) []byte - ProcessingBlockCalled func() bool - SetProcessingBlockCalled func(processingBlock bool) - ConsensusGroupSizeCalled func() int - SetThresholdCalled func(subroundId int, threshold int) + CanProcessReceivedMessageCalled func(cnsDta consensus.Message, currentRoundIndex int32, currentSubroundId int) bool + GenerateBitmapCalled func(subroundId int) []byte + ProcessingBlockCalled func() bool + SetProcessingBlockCalled func(processingBlock bool) + ConsensusGroupSizeCalled func() int + SetThresholdCalled func(subroundId int, threshold int) } func (cnsm *ConsensusStateMock) ResetConsensusState() { @@ -42,9 +44,10 @@ func (cnsm *ConsensusStateMock) GetLeader() (string, error) { return cnsm.GetLeaderCalled() } -func (cnsm *ConsensusStateMock) GetNextConsensusGroup(randomSource string, - vgs consensus.ValidatorGroupSelector) ([]string, - error) { +func (cnsm *ConsensusStateMock) GetNextConsensusGroup( + randomSource string, + vgs sharding.NodesCoordinator, +) ([]string, error) { return cnsm.GetNextConsensusGroupCalled(randomSource, vgs) } @@ -84,8 +87,11 @@ func (cnsm *ConsensusStateMock) CanDoSubroundJob(currentSubroundId int) bool { return cnsm.CanDoSubroundJobCalled(currentSubroundId) } -func (cnsm *ConsensusStateMock) CanProcessReceivedMessage(cnsDta consensus.Message, currentRoundIndex int32, - currentSubroundId int) bool { +func (cnsm *ConsensusStateMock) CanProcessReceivedMessage( + cnsDta consensus.Message, + currentRoundIndex int32, + currentSubroundId int, +) bool { return cnsm.CanProcessReceivedMessageCalled(cnsDta, currentRoundIndex, currentSubroundId) } diff --git a/consensus/mock/mockTestInitializer.go b/consensus/mock/mockTestInitializer.go index d355fae6784..a290fa5c40d 100644 --- a/consensus/mock/mockTestInitializer.go +++ b/consensus/mock/mockTestInitializer.go @@ -98,10 +98,6 @@ func InitConsensusCore() *ConsensusCoreMock { }, } blockProcessorMock := InitBlockProcessorMock() - blockTrackerMock := &BlocksTrackerMock{ - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - }, - } bootstrapperMock := &BootstrapperMock{} broadcastMessengerMock := &BroadcastMessengerMock{ BroadcastConsensusMessageCalled: func(message *consensus.Message) error { @@ -122,12 +118,11 @@ func InitConsensusCore() *ConsensusCoreMock { rounderMock := &RounderMock{} shardCoordinatorMock := ShardCoordinatorMock{} syncTimerMock := &SyncTimerMock{} - validatorGroupSelector := &ValidatorGroupSelectorMock{} + validatorGroupSelector := &NodesCoordinatorMock{} container := &ConsensusCoreMock{ blockChain, blockProcessorMock, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, chronologyHandlerMock, diff --git a/consensus/mock/nodesCoordinatorMock.go b/consensus/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..fa0ce8502ee --- /dev/null +++ b/consensus/mock/nodesCoordinatorMock.go @@ -0,0 +1,115 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), + } + + return list, nil +} + +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index 5faa06ebc25..80b060ee107 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -19,7 +19,6 @@ type SposWorkerMock struct { GetBroadcastBlockCalled func(data.BodyHandler, data.HeaderHandler) error GetBroadcastHeaderCalled func(data.HeaderHandler) error ExecuteStoredMessagesCalled func() - BroadcastUnnotarisedBlocksCalled func() } func (sposWorkerMock *SposWorkerMock) AddReceivedMessageCall(messageType consensus.MessageType, @@ -55,10 +54,6 @@ func (sposWorkerMock *SposWorkerMock) ExecuteStoredMessages() { sposWorkerMock.ExecuteStoredMessagesCalled() } -func (sposWorkerMock *SposWorkerMock) BroadcastUnnotarisedBlocks() { - sposWorkerMock.BroadcastUnnotarisedBlocksCalled() -} - // IsInterfaceNil returns true if there is no value under the interface func (sposWorkerMock *SposWorkerMock) IsInterfaceNil() bool { if sposWorkerMock == nil { diff --git a/consensus/mock/validatorGroupSelectorMock.go b/consensus/mock/validatorGroupSelectorMock.go deleted file mode 100644 index 31ff0a70fd6..00000000000 --- a/consensus/mock/validatorGroupSelectorMock.go +++ /dev/null @@ -1,55 +0,0 @@ -package mock - -import ( - "math/big" - - "github.com/ElrondNetwork/elrond-go/consensus" -) - -type ValidatorGroupSelectorMock struct { - ComputeValidatorsGroupCalled func([]byte) ([]consensus.Validator, error) -} - -func (vgsm ValidatorGroupSelectorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { - if vgsm.ComputeValidatorsGroupCalled != nil { - return vgsm.ComputeValidatorsGroupCalled(randomness) - } - - list := []consensus.Validator{ - NewValidatorMock(big.NewInt(0), 0, []byte("A")), - NewValidatorMock(big.NewInt(0), 0, []byte("B")), - NewValidatorMock(big.NewInt(0), 0, []byte("C")), - NewValidatorMock(big.NewInt(0), 0, []byte("D")), - NewValidatorMock(big.NewInt(0), 0, []byte("E")), - NewValidatorMock(big.NewInt(0), 0, []byte("F")), - NewValidatorMock(big.NewInt(0), 0, []byte("G")), - NewValidatorMock(big.NewInt(0), 0, []byte("H")), - NewValidatorMock(big.NewInt(0), 0, []byte("I")), - } - - return list, nil -} - -func (vgsm ValidatorGroupSelectorMock) ConsensusGroupSize() int { - panic("implement me") -} - -func (vgsm ValidatorGroupSelectorMock) LoadEligibleList(eligibleList []consensus.Validator) error { - return nil -} - -func (vgsm ValidatorGroupSelectorMock) SetConsensusGroupSize(int) error { - panic("implement me") -} - -func (vgsm ValidatorGroupSelectorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { - panic("implement me") -} - -// IsInterfaceNil returns true if there is no value under the interface -func (vgsm *ValidatorGroupSelectorMock) IsInterfaceNil() bool { - if vgsm == nil { - return true - } - return false -} diff --git a/consensus/mock/validatorMock.go b/consensus/mock/validatorMock.go index 517c49e6dcb..56621342a59 100644 --- a/consensus/mock/validatorMock.go +++ b/consensus/mock/validatorMock.go @@ -5,13 +5,14 @@ import ( ) type ValidatorMock struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } -func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte) *ValidatorMock { - return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey} +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} } func (vm *ValidatorMock) Stake() *big.Int { @@ -26,6 +27,10 @@ func (vm *ValidatorMock) PubKey() []byte { return vm.pubKey } +func (vm *ValidatorMock) Address() []byte { + return vm.address +} + // IsInterfaceNil returns true if there is no value under the interface func (vm *ValidatorMock) IsInterfaceNil() bool { if vm == nil { diff --git a/consensus/spos/bls/blsSubroundsFactory.go b/consensus/spos/bls/blsSubroundsFactory.go index c6d455c0449..9d9a68f90fc 100644 --- a/consensus/spos/bls/blsSubroundsFactory.go +++ b/consensus/spos/bls/blsSubroundsFactory.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/statusHandler" ) @@ -17,6 +18,7 @@ type factory struct { worker spos.WorkerHandler appStatusHandler core.AppStatusHandler + indexer indexer.Indexer } // NewSubroundsFactory creates a new consensusState object @@ -63,7 +65,7 @@ func checkNewFactoryParams( return nil } -// SetAppStatusHandler method set appStatusHandler +// SetAppStatusHandler method will update the value of the factory's appStatusHandler func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { if ash == nil || ash.IsInterfaceNil() { return spos.ErrNilAppStatusHandler @@ -73,6 +75,11 @@ func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } +// SetIndexer method will update the value of the factory's indexer +func (fct *factory) SetIndexer(indexer indexer.Indexer) { + fct.indexer = indexer +} + // GenerateSubrounds will generate the subrounds used in BLS Cns func (fct *factory) GenerateSubrounds() error { fct.initConsensusThreshold() @@ -129,7 +136,6 @@ func (fct *factory) generateStartRoundSubround() error { processingThresholdPercent, getSubroundName, fct.worker.ExecuteStoredMessages, - fct.worker.BroadcastUnnotarisedBlocks, ) if err != nil { return err @@ -140,6 +146,8 @@ func (fct *factory) generateStartRoundSubround() error { return err } + subroundStartRound.SetIndexer(fct.indexer) + fct.consensusCore.Chronology().AddSubround(subroundStartRound) return nil diff --git a/consensus/spos/bls/export_test.go b/consensus/spos/bls/export_test.go index de65b8dd80f..41c9fcbb471 100644 --- a/consensus/spos/bls/export_test.go +++ b/consensus/spos/bls/export_test.go @@ -60,8 +60,8 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.ValidatorGroupSelector { - return fct.consensusCore.ValidatorGroupSelector() +func (fct *factory) NodesCoordinator() sharding.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() } func (fct *factory) Worker() spos.WorkerHandler { diff --git a/consensus/spos/bls/subroundEndRound.go b/consensus/spos/bls/subroundEndRound.go index d376d5c84b4..9d0b041e395 100644 --- a/consensus/spos/bls/subroundEndRound.go +++ b/consensus/spos/bls/subroundEndRound.go @@ -111,8 +111,6 @@ func (sr *subroundEndRound) doEndRoundJob() bool { log.Error(err.Error()) } - sr.BlocksTracker().SetBlockBroadcastRound(sr.Header.GetNonce(), sr.RoundIndex) - log.Info(fmt.Sprintf("%sStep 3: BlockBody and Header has been committed and broadcast\n", sr.SyncTimer().FormattedCurrentTime())) err = sr.broadcastMiniBlocksAndTransactions() diff --git a/consensus/spos/bn/bnSubroundsFactory.go b/consensus/spos/bn/bnSubroundsFactory.go index 18d2c4b3fdc..15f5c31c9d2 100644 --- a/consensus/spos/bn/bnSubroundsFactory.go +++ b/consensus/spos/bn/bnSubroundsFactory.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/statusHandler" ) @@ -17,6 +18,7 @@ type factory struct { worker spos.WorkerHandler appStatusHandler core.AppStatusHandler + indexer indexer.Indexer } // NewSubroundsFactory creates a new factory for BN subrounds @@ -67,7 +69,7 @@ func checkNewFactoryParams( return nil } -// SetAppStatusHandler method set appStatusHandler +// SetAppStatusHandler method will update the value of the factory's appStatusHandler func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { if ash == nil || ash.IsInterfaceNil() { return spos.ErrNilAppStatusHandler @@ -77,6 +79,11 @@ func (fct *factory) SetAppStatusHandler(ash core.AppStatusHandler) error { return nil } +// SetIndexer method will update the value of the factory's indexer +func (fct *factory) SetIndexer(indexer indexer.Indexer) { + fct.indexer = indexer +} + // GenerateSubrounds will generate the subrounds used in Belare & Naveen Cns func (fct *factory) GenerateSubrounds() error { fct.initConsensusThreshold() @@ -149,7 +156,6 @@ func (fct *factory) generateStartRoundSubround() error { processingThresholdPercent, getSubroundName, fct.worker.ExecuteStoredMessages, - fct.worker.BroadcastUnnotarisedBlocks, ) if err != nil { @@ -161,6 +167,8 @@ func (fct *factory) generateStartRoundSubround() error { return err } + subroundStartRound.SetIndexer(fct.indexer) + fct.consensusCore.Chronology().AddSubround(subroundStartRound) return nil diff --git a/consensus/spos/bn/export_test.go b/consensus/spos/bn/export_test.go index af3016b28f5..45e79808b5c 100644 --- a/consensus/spos/bn/export_test.go +++ b/consensus/spos/bn/export_test.go @@ -60,8 +60,8 @@ func (fct *factory) SyncTimer() ntp.SyncTimer { return fct.consensusCore.SyncTimer() } -func (fct *factory) ValidatorGroupSelector() consensus.ValidatorGroupSelector { - return fct.consensusCore.ValidatorGroupSelector() +func (fct *factory) NodesCoordinator() sharding.NodesCoordinator { + return fct.consensusCore.NodesCoordinator() } func (fct *factory) Worker() spos.WorkerHandler { diff --git a/consensus/spos/bn/subroundEndRound.go b/consensus/spos/bn/subroundEndRound.go index e055260bc07..8607ffd4158 100644 --- a/consensus/spos/bn/subroundEndRound.go +++ b/consensus/spos/bn/subroundEndRound.go @@ -106,8 +106,6 @@ func (sr *subroundEndRound) doEndRoundJob() bool { log.Error(err.Error()) } - sr.BlocksTracker().SetBlockBroadcastRound(sr.Header.GetNonce(), sr.RoundIndex) - log.Info(fmt.Sprintf("%sStep 6: TxBlockBody and Header has been committed and broadcast\n", sr.SyncTimer().FormattedCurrentTime())) err = sr.broadcastMiniBlocksAndTransactions() diff --git a/consensus/spos/commonSubround/base_test.go b/consensus/spos/commonSubround/base_test.go index bd2f4ab07f3..00eaa5c98e6 100644 --- a/consensus/spos/commonSubround/base_test.go +++ b/consensus/spos/commonSubround/base_test.go @@ -115,6 +115,3 @@ func getSubroundName(subroundId int) string { // executeStoredMessages tries to execute all the messages received which are valid for execution func executeStoredMessages() { } - -func broadcastUnnotarisedBlocks() { -} diff --git a/consensus/spos/commonSubround/subroundStartRound.go b/consensus/spos/commonSubround/subroundStartRound.go index 8f294fd50aa..886e3d48359 100644 --- a/consensus/spos/commonSubround/subroundStartRound.go +++ b/consensus/spos/commonSubround/subroundStartRound.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/statusHandler" ) @@ -19,9 +20,9 @@ type SubroundStartRound struct { processingThresholdPercentage int getSubroundName func(subroundId int) string executeStoredMessages func() - broadcastUnnotarisedBlocks func() appStatusHandler core.AppStatusHandler + indexer indexer.Indexer } // NewSubroundStartRound creates a SubroundStartRound object @@ -31,11 +32,9 @@ func NewSubroundStartRound( processingThresholdPercentage int, getSubroundName func(subroundId int) string, executeStoredMessages func(), - broadcastUnnotarisedBlocks func(), ) (*SubroundStartRound, error) { err := checkNewSubroundStartRoundParams( baseSubround, - broadcastUnnotarisedBlocks, ) if err != nil { return nil, err @@ -46,8 +45,8 @@ func NewSubroundStartRound( processingThresholdPercentage, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, statusHandler.NewNilStatusHandler(), + indexer.NewNilIndexer(), } srStartRound.Job = srStartRound.doStartRoundJob srStartRound.Check = srStartRound.doStartRoundConsensusCheck @@ -58,7 +57,6 @@ func NewSubroundStartRound( func checkNewSubroundStartRoundParams( baseSubround *spos.Subround, - broadcastUnnotarisedBlocks func(), ) error { if baseSubround == nil { return spos.ErrNilSubround @@ -66,9 +64,6 @@ func checkNewSubroundStartRoundParams( if baseSubround.ConsensusState == nil { return spos.ErrNilConsensusState } - if broadcastUnnotarisedBlocks == nil { - return spos.ErrNilBroadcastUnnotarisedBlocks - } err := spos.ValidateConsensusCore(baseSubround.ConsensusCoreHandler) @@ -85,6 +80,11 @@ func (sr *SubroundStartRound) SetAppStatusHandler(ash core.AppStatusHandler) err return nil } +// SetIndexer method set indexer +func (sr *SubroundStartRound) SetIndexer(indexer indexer.Indexer) { + sr.indexer = indexer +} + // doStartRoundJob method does the job of the subround StartRound func (sr *SubroundStartRound) doStartRoundJob() bool { sr.ResetConsensusState() @@ -147,6 +147,8 @@ func (sr *SubroundStartRound) initCurrentRound() bool { pubKeys := sr.ConsensusGroup() + sr.indexRoundIfNeeded(pubKeys) + selfIndex, err := sr.SelfConsensusGroupIndex() if err != nil { log.Info(fmt.Sprintf("%scanceled round %d in subround %s, not in the consensus group\n", @@ -185,16 +187,32 @@ func (sr *SubroundStartRound) initCurrentRound() bool { sr.SetStatus(sr.Current(), spos.SsFinished) - if leader == sr.SelfPubKey() { - //TODO: Should be analyzed if call of sr.broadcastUnnotarisedBlocks() is still necessary - } - // execute stored messages which were received in this new round but before this initialisation go sr.executeStoredMessages() return true } +func (sr *SubroundStartRound) indexRoundIfNeeded(pubKeys []string) { + if sr.indexer == nil || sr.indexer.IsNilIndexer() { + return + } + + shardId := sr.ShardCoordinator().SelfId() + signersIndexes := sr.NodesCoordinator().GetValidatorsIndexes(pubKeys) + round := sr.Rounder().Index() + + roundInfo := indexer.RoundInfo{ + Index: uint64(round), + SignersIndexes: signersIndexes, + BlockWasProposed: false, + ShardId: shardId, + Timestamp: time.Duration(sr.RoundTimeStamp.Unix()), + } + + go sr.indexer.SaveRoundInfo(roundInfo) +} + func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error { currentHeader := sr.Blockchain().GetCurrentBlockHeader() if currentHeader == nil { @@ -204,11 +222,20 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error } } - randomSource := fmt.Sprintf("%d-%s", roundIndex, core.ToB64(currentHeader.GetRandSeed())) + randomSeed := currentHeader.GetRandSeed() - log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", randomSource)) + log.Info(fmt.Sprintf("random source used to determine the next consensus group is: %s\n", + core.ToB64(randomSeed)), + ) + + shardId := sr.ShardCoordinator().SelfId() - nextConsensusGroup, err := sr.GetNextConsensusGroup(randomSource, sr.ValidatorGroupSelector()) + nextConsensusGroup, _, err := sr.GetNextConsensusGroup( + randomSeed, + uint64(sr.RoundIndex), + shardId, + sr.NodesCoordinator(), + ) if err != nil { return err } @@ -224,5 +251,7 @@ func (sr *SubroundStartRound) generateNextConsensusGroup(roundIndex int64) error sr.SetConsensusGroup(nextConsensusGroup) + sr.BlockProcessor().SetConsensusData(randomSeed, uint64(sr.RoundIndex), currentHeader.GetEpoch(), shardId) + return nil } diff --git a/consensus/spos/commonSubround/subroundStartRound_test.go b/consensus/spos/commonSubround/subroundStartRound_test.go index 22b8163861d..911ae2cc9fb 100644 --- a/consensus/spos/commonSubround/subroundStartRound_test.go +++ b/consensus/spos/commonSubround/subroundStartRound_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/mock" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/commonSubround" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -19,14 +19,17 @@ func defaultSubroundStartRoundFromSubround(sr *spos.Subround) (*commonSubround.S processingThresholdPercent, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, ) return startRound, err } -func defaultSubround(consensusState *spos.ConsensusState, ch chan bool, container spos.ConsensusCoreHandler) (*spos.Subround, - error) { +func defaultSubround( + consensusState *spos.ConsensusState, + ch chan bool, + container spos.ConsensusCoreHandler, +) (*spos.Subround, error) { + return spos.NewSubround( -1, int(SrStartRound), @@ -51,7 +54,6 @@ func initSubroundStartRoundWithContainer(container spos.ConsensusCoreHandler) *c processingThresholdPercent, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, ) return srStartRound @@ -71,7 +73,6 @@ func TestSubroundStartRound_NewSubroundStartRoundNilSubroundShouldFail(t *testin processingThresholdPercent, getSubroundName, executeStoredMessages, - broadcastUnnotarisedBlocks, ) assert.Nil(t, srStartRound) @@ -126,28 +127,6 @@ func TestSubroundStartRound_NewSubroundStartRoundNilConsensusStateShouldFail(t * assert.Equal(t, spos.ErrNilConsensusState, err) } -func TestSubroundStartRound_NewSubroundStartRoundNilBroadcastUnnotarisedBlocksFunctionShouldFail(t *testing.T) { - t.Parallel() - - container := mock.InitConsensusCore() - consensusState := initConsensusState() - ch := make(chan bool, 1) - - sr, _ := defaultSubround(consensusState, ch, container) - - srStartRound, err := commonSubround.NewSubroundStartRound( - sr, - extend, - processingThresholdPercent, - getSubroundName, - executeStoredMessages, - nil, - ) - - assert.Nil(t, srStartRound) - assert.Equal(t, spos.ErrNilBroadcastUnnotarisedBlocks, err) -} - func TestSubroundStartRound_NewSubroundStartRoundNilMultiSignerShouldFail(t *testing.T) { t.Parallel() @@ -319,9 +298,9 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenShouldSyncRetur func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextConsensusGroupErr(t *testing.T) { t.Parallel() - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte, round uint64, shardId uint32) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() @@ -336,9 +315,13 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGenerateNextCon func TestSubroundStartRound_InitCurrentRoundShouldReturnFalseWhenGetLeaderErr(t *testing.T) { t.Parallel() - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { - return make([]consensus.Validator, 0), nil + validatorGroupSelector := &mock.NodesCoordinatorMock{} + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { + return make([]sharding.Validator, 0), nil } container := mock.InitConsensusCore() @@ -423,10 +406,14 @@ func TestSubroundStartRound_InitCurrentRoundShouldReturnTrue(t *testing.T) { func TestSubroundStartRound_GenerateNextConsensusGroupShouldReturnErr(t *testing.T) { t.Parallel() - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} err := errors.New("error") - validatorGroupSelector.ComputeValidatorsGroupCalled = func(bytes []byte) ([]consensus.Validator, error) { + validatorGroupSelector.ComputeValidatorsGroupCalled = func( + bytes []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { return nil, err } container := mock.InitConsensusCore() diff --git a/consensus/spos/consensusCore.go b/consensus/spos/consensusCore.go index 35f9989cf97..42d6bc39d0b 100644 --- a/consensus/spos/consensusCore.go +++ b/consensus/spos/consensusCore.go @@ -14,28 +14,26 @@ import ( // ConsensusCore implements ConsensusCoreHandler and provides access to common functionalities // for the rest of the consensus structures type ConsensusCore struct { - blockChain data.ChainHandler - blockProcessor process.BlockProcessor - blocksTracker process.BlocksTracker - bootstrapper process.Bootstrapper - broadcastMessenger consensus.BroadcastMessenger - chronologyHandler consensus.ChronologyHandler - hasher hashing.Hasher - marshalizer marshal.Marshalizer - blsPrivateKey crypto.PrivateKey - blsSingleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - rounder consensus.Rounder - shardCoordinator sharding.Coordinator - syncTimer ntp.SyncTimer - validatorGroupSelector consensus.ValidatorGroupSelector + blockChain data.ChainHandler + blockProcessor process.BlockProcessor + bootstrapper process.Bootstrapper + broadcastMessenger consensus.BroadcastMessenger + chronologyHandler consensus.ChronologyHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + blsPrivateKey crypto.PrivateKey + blsSingleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + rounder consensus.Rounder + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + syncTimer ntp.SyncTimer } // NewConsensusCore creates a new ConsensusCore instance func NewConsensusCore( blockChain data.ChainHandler, blockProcessor process.BlockProcessor, - blocksTracker process.BlocksTracker, bootstrapper process.Bootstrapper, broadcastMessenger consensus.BroadcastMessenger, chronologyHandler consensus.ChronologyHandler, @@ -46,13 +44,13 @@ func NewConsensusCore( multiSigner crypto.MultiSigner, rounder consensus.Rounder, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, syncTimer ntp.SyncTimer, - validatorGroupSelector consensus.ValidatorGroupSelector) (*ConsensusCore, error) { +) (*ConsensusCore, error) { consensusCore := &ConsensusCore{ blockChain, blockProcessor, - blocksTracker, bootstrapper, broadcastMessenger, chronologyHandler, @@ -63,8 +61,8 @@ func NewConsensusCore( multiSigner, rounder, shardCoordinator, + nodesCoordinator, syncTimer, - validatorGroupSelector, } err := ValidateConsensusCore(consensusCore) @@ -85,11 +83,6 @@ func (cc *ConsensusCore) BlockProcessor() process.BlockProcessor { return cc.blockProcessor } -// BlocksTracker gets the BlocksTracker stored in the ConsensusCore -func (cc *ConsensusCore) BlocksTracker() process.BlocksTracker { - return cc.blocksTracker -} - // BootStrapper gets the Bootstrapper stored in the ConsensusCore func (cc *ConsensusCore) BootStrapper() process.Bootstrapper { return cc.bootstrapper @@ -135,9 +128,9 @@ func (cc *ConsensusCore) SyncTimer() ntp.SyncTimer { return cc.syncTimer } -// ValidatorGroupSelector gets the ValidatorGroupSelector stored in the ConsensusCore -func (cc *ConsensusCore) ValidatorGroupSelector() consensus.ValidatorGroupSelector { - return cc.validatorGroupSelector +// NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore +func (cc *ConsensusCore) NodesCoordinator() sharding.NodesCoordinator { + return cc.nodesCoordinator } // RandomnessPrivateKey returns the BLS private key stored in the ConsensusStore diff --git a/consensus/spos/consensusCoreValidator.go b/consensus/spos/consensusCoreValidator.go index cda289a634f..b5a1026e6f6 100644 --- a/consensus/spos/consensusCoreValidator.go +++ b/consensus/spos/consensusCoreValidator.go @@ -11,9 +11,6 @@ func ValidateConsensusCore(container ConsensusCoreHandler) error { if container.BlockProcessor() == nil || container.BlockProcessor().IsInterfaceNil() { return ErrNilBlockProcessor } - if container.BlocksTracker() == nil || container.BlocksTracker().IsInterfaceNil() { - return ErrNilBlocksTracker - } if container.BootStrapper() == nil || container.BootStrapper().IsInterfaceNil() { return ErrNilBootstrapper } @@ -41,7 +38,7 @@ func ValidateConsensusCore(container ConsensusCoreHandler) error { if container.SyncTimer() == nil || container.SyncTimer().IsInterfaceNil() { return ErrNilSyncTimer } - if container.ValidatorGroupSelector() == nil || container.ValidatorGroupSelector().IsInterfaceNil() { + if container.NodesCoordinator() == nil || container.NodesCoordinator().IsInterfaceNil() { return ErrNilValidatorGroupSelector } if container.RandomnessPrivateKey() == nil || container.RandomnessPrivateKey().IsInterfaceNil() { diff --git a/consensus/spos/consensusCoreValidator_test.go b/consensus/spos/consensusCoreValidator_test.go index b09cf6f0fdc..74c22310434 100644 --- a/consensus/spos/consensusCoreValidator_test.go +++ b/consensus/spos/consensusCoreValidator_test.go @@ -10,7 +10,6 @@ import ( func initConsensusDataContainer() *ConsensusCore { blockChain := &mock.BlockChainMock{} blockProcessorMock := mock.InitBlockProcessorMock() - blocksTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} chronologyHandlerMock := mock.InitChronologyHandlerMock() @@ -22,24 +21,23 @@ func initConsensusDataContainer() *ConsensusCore { rounderMock := &mock.RounderMock{} shardCoordinatorMock := mock.ShardCoordinatorMock{} syncTimerMock := &mock.SyncTimerMock{} - validatorGroupSelector := &mock.ValidatorGroupSelectorMock{} + validatorGroupSelector := &mock.NodesCoordinatorMock{} return &ConsensusCore{ - blockChain: blockChain, - blockProcessor: blockProcessorMock, - blocksTracker: blocksTrackerMock, - bootstrapper: bootstrapperMock, - broadcastMessenger: broadcastMessengerMock, - chronologyHandler: chronologyHandlerMock, - hasher: hasherMock, - marshalizer: marshalizerMock, - blsPrivateKey: blsPrivateKeyMock, - blsSingleSigner: blsSingleSignerMock, - multiSigner: multiSignerMock, - rounder: rounderMock, - shardCoordinator: shardCoordinatorMock, - syncTimer: syncTimerMock, - validatorGroupSelector: validatorGroupSelector, + blockChain: blockChain, + blockProcessor: blockProcessorMock, + bootstrapper: bootstrapperMock, + broadcastMessenger: broadcastMessengerMock, + chronologyHandler: chronologyHandlerMock, + hasher: hasherMock, + marshalizer: marshalizerMock, + blsPrivateKey: blsPrivateKeyMock, + blsSingleSigner: blsSingleSignerMock, + multiSigner: multiSignerMock, + rounder: rounderMock, + shardCoordinator: shardCoordinatorMock, + syncTimer: syncTimerMock, + nodesCoordinator: validatorGroupSelector, } } @@ -157,7 +155,7 @@ func TestConsensusContainerValidator_ValidateNilValidatorGroupSelectorShouldFail t.Parallel() container := initConsensusDataContainer() - container.validatorGroupSelector = nil + container.nodesCoordinator = nil err := ValidateConsensusCore(container) diff --git a/consensus/spos/consensusCore_test.go b/consensus/spos/consensusCore_test.go index 21df3eaa9ff..ff62458bd89 100644 --- a/consensus/spos/consensusCore_test.go +++ b/consensus/spos/consensusCore_test.go @@ -16,7 +16,6 @@ func TestConsensusCore_WithNilBlockchainShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( nil, consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -27,8 +26,9 @@ func TestConsensusCore_WithNilBlockchainShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockChain, err) @@ -42,7 +42,6 @@ func TestConsensusCore_WithNilBlockProcessorShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), nil, - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -53,39 +52,14 @@ func TestConsensusCore_WithNilBlockProcessorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlockProcessor, err) } -func TestConsensusCore_WithNilBlocksTrackerShouldFail(t *testing.T) { - t.Parallel() - - consensusCoreMock := mock.InitConsensusCore() - - consensusCore, err := spos.NewConsensusCore( - consensusCoreMock.Blockchain(), - consensusCoreMock.BlockProcessor(), - nil, - consensusCoreMock.BootStrapper(), - consensusCoreMock.BroadcastMessenger(), - consensusCoreMock.Chronology(), - consensusCoreMock.Hasher(), - consensusCoreMock.Marshalizer(), - consensusCoreMock.RandomnessPrivateKey(), - consensusCoreMock.RandomnessSingleSigner(), - consensusCoreMock.MultiSigner(), - consensusCoreMock.Rounder(), - consensusCoreMock.ShardCoordinator(), - consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) - - assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilBlocksTracker, err) -} - func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { t.Parallel() @@ -94,7 +68,6 @@ func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), nil, consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -105,8 +78,9 @@ func TestConsensusCore_WithNilBootstrapperShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBootstrapper, err) @@ -120,7 +94,6 @@ func TestConsensusCore_WithNilBroadcastMessengerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), nil, consensusCoreMock.Chronology(), @@ -131,8 +104,9 @@ func TestConsensusCore_WithNilBroadcastMessengerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBroadcastMessenger, err) @@ -146,7 +120,6 @@ func TestConsensusCore_WithNilChronologyShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), nil, @@ -157,8 +130,9 @@ func TestConsensusCore_WithNilChronologyShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilChronologyHandler, err) @@ -172,7 +146,6 @@ func TestConsensusCore_WithNilHasherShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -183,8 +156,9 @@ func TestConsensusCore_WithNilHasherShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilHasher, err) @@ -198,7 +172,6 @@ func TestConsensusCore_WithNilMarshalizerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -209,8 +182,9 @@ func TestConsensusCore_WithNilMarshalizerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMarshalizer, err) @@ -224,7 +198,6 @@ func TestConsensusCore_WithNilBlsPrivateKeyShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -235,8 +208,9 @@ func TestConsensusCore_WithNilBlsPrivateKeyShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsPrivateKey, err) @@ -250,7 +224,6 @@ func TestConsensusCore_WithNilBlsSingleSignerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -261,8 +234,9 @@ func TestConsensusCore_WithNilBlsSingleSignerShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilBlsSingleSigner, err) @@ -276,7 +250,6 @@ func TestConsensusCore_WithNilMultiSignerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -287,8 +260,9 @@ func TestConsensusCore_WithNilMultiSignerShouldFail(t *testing.T) { nil, consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilMultiSigner, err) @@ -302,7 +276,6 @@ func TestConsensusCore_WithNilRounderShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -313,8 +286,9 @@ func TestConsensusCore_WithNilRounderShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), nil, consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilRounder, err) @@ -328,7 +302,6 @@ func TestConsensusCore_WithNilShardCoordinatorShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -339,14 +312,15 @@ func TestConsensusCore_WithNilShardCoordinatorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), nil, + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.Nil(t, consensusCore) assert.Equal(t, spos.ErrNilShardCoordinator, err) } -func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { +func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { t.Parallel() consensusCoreMock := mock.InitConsensusCore() @@ -354,7 +328,6 @@ func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -366,13 +339,14 @@ func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), nil, - consensusCoreMock.ValidatorGroupSelector()) + consensusCoreMock.SyncTimer(), + ) assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilSyncTimer, err) + assert.Equal(t, spos.ErrNilValidatorGroupSelector, err) } -func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { +func TestConsensusCore_WithNilSyncTimerShouldFail(t *testing.T) { t.Parallel() consensusCoreMock := mock.InitConsensusCore() @@ -380,7 +354,6 @@ func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -391,11 +364,12 @@ func TestConsensusCore_WithNilValidatorGroupSelectorShouldFail(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), - consensusCoreMock.SyncTimer(), - nil) + consensusCoreMock.NodesCoordinator(), + nil, + ) assert.Nil(t, consensusCore) - assert.Equal(t, spos.ErrNilValidatorGroupSelector, err) + assert.Equal(t, spos.ErrNilSyncTimer, err) } func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { @@ -406,7 +380,6 @@ func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { consensusCore, err := spos.NewConsensusCore( consensusCoreMock.Blockchain(), consensusCoreMock.BlockProcessor(), - consensusCoreMock.BlocksTracker(), consensusCoreMock.BootStrapper(), consensusCoreMock.BroadcastMessenger(), consensusCoreMock.Chronology(), @@ -417,8 +390,9 @@ func TestConsensusCore_CreateConsensusCoreShouldWork(t *testing.T) { consensusCoreMock.MultiSigner(), consensusCoreMock.Rounder(), consensusCoreMock.ShardCoordinator(), + consensusCoreMock.NodesCoordinator(), consensusCoreMock.SyncTimer(), - consensusCoreMock.ValidatorGroupSelector()) + ) assert.NotNil(t, consensusCore) assert.Nil(t, err) diff --git a/consensus/spos/consensusState.go b/consensus/spos/consensusState.go index 25607578f8c..cb34e1b472a 100644 --- a/consensus/spos/consensusState.go +++ b/consensus/spos/consensusState.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/sharding" ) var log = logger.DefaultLogger() @@ -94,21 +95,28 @@ func (cns *ConsensusState) GetLeader() (string, error) { // GetNextConsensusGroup gets the new consensus group for the current round based on current eligible list and a random // source for the new selection -func (cns *ConsensusState) GetNextConsensusGroup(randomSource string, vgs consensus.ValidatorGroupSelector) ([]string, - error) { - validatorsGroup, err := vgs.ComputeValidatorsGroup([]byte(randomSource)) - +func (cns *ConsensusState) GetNextConsensusGroup( + randomSource []byte, + round uint64, + shardId uint32, + nodesCoordinator sharding.NodesCoordinator, +) ([]string, []string, error) { + + validatorsGroup, err := nodesCoordinator.ComputeValidatorsGroup(randomSource, round, shardId) if err != nil { - return nil, err + return nil, nil, err } - newConsensusGroup := make([]string, 0) + consensusSize := len(validatorsGroup) + newConsensusGroup := make([]string, consensusSize) + consensusRewardAddresses := make([]string, consensusSize) - for i := 0; i < len(validatorsGroup); i++ { - newConsensusGroup = append(newConsensusGroup, string(validatorsGroup[i].PubKey())) + for i := 0; i < consensusSize; i++ { + newConsensusGroup[i] = string(validatorsGroup[i].PubKey()) + consensusRewardAddresses[i] = string(validatorsGroup[i].Address()) } - return newConsensusGroup, nil + return newConsensusGroup, consensusRewardAddresses, nil } // IsConsensusDataSet method returns true if the consensus data for the current round is set and false otherwise diff --git a/consensus/spos/consensusState_test.go b/consensus/spos/consensusState_test.go index 97d186f73d8..eb67fc567a2 100644 --- a/consensus/spos/consensusState_test.go +++ b/consensus/spos/consensusState_test.go @@ -5,11 +5,11 @@ import ( "testing" "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/spos" - "github.com/ElrondNetwork/elrond-go/consensus/mock" + "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/bn" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) @@ -138,13 +138,17 @@ func TestConsensusState_GetNextConsensusGroupShouldFailWhenComputeValidatorsGrou cns := internalInitConsensusState() - vgs := &mock.ValidatorGroupSelectorMock{} + nodesCoordinator := &mock.NodesCoordinatorMock{} err := errors.New("error") - vgs.ComputeValidatorsGroupCalled = func(randomness []byte) ([]consensus.Validator, error) { + nodesCoordinator.ComputeValidatorsGroupCalled = func( + randomness []byte, + round uint64, + shardId uint32, + ) ([]sharding.Validator, error) { return nil, err } - _, err2 := cns.GetNextConsensusGroup("", vgs) + _, _, err2 := cns.GetNextConsensusGroup([]byte(""), 0, 0, nodesCoordinator) assert.Equal(t, err, err2) } @@ -153,11 +157,12 @@ func TestConsensusState_GetNextConsensusGroupShouldWork(t *testing.T) { cns := internalInitConsensusState() - vgs := &mock.ValidatorGroupSelectorMock{} + nodesCoordinator := &mock.NodesCoordinatorMock{} - nextConsensusGroup, err := cns.GetNextConsensusGroup("", vgs) + nextConsensusGroup, rewardAddresses, err := cns.GetNextConsensusGroup(nil, 0, 0, nodesCoordinator) assert.Nil(t, err) assert.NotNil(t, nextConsensusGroup) + assert.NotNil(t, rewardAddresses) } func TestConsensusState_IsConsensusDataSetShouldReturnTrue(t *testing.T) { @@ -209,13 +214,13 @@ func TestConsensusState_IsJobDoneShouldReturnFalse(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone("1", bn.SrBlock, false) + _ = cns.SetJobDone("1", bn.SrBlock, false) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) - cns.SetJobDone("1", bn.SrCommitment, true) + _ = cns.SetJobDone("1", bn.SrCommitment, true) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) - cns.SetJobDone("2", bn.SrBlock, true) + _ = cns.SetJobDone("2", bn.SrBlock, true) assert.False(t, cns.IsJobDone("1", bn.SrBlock)) } @@ -224,7 +229,7 @@ func TestConsensusState_IsJobDoneShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone("1", bn.SrBlock, true) + _ = cns.SetJobDone("1", bn.SrBlock, true) assert.True(t, cns.IsJobDone("1", bn.SrBlock)) } @@ -234,13 +239,13 @@ func TestConsensusState_IsSelfJobDoneShouldReturnFalse(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) - cns.SetJobDone(cns.SelfPubKey(), bn.SrCommitment, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrCommitment, true) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) - cns.SetJobDone(cns.SelfPubKey()+"X", bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey()+"X", bn.SrBlock, true) assert.False(t, cns.IsSelfJobDone(bn.SrBlock)) } @@ -249,7 +254,7 @@ func TestConsensusState_IsSelfJobDoneShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) assert.True(t, cns.IsSelfJobDone(bn.SrBlock)) } @@ -348,7 +353,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnFalseWhenSelfJobIsDone(t *te cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) assert.False(t, cns.CanDoSubroundJob(bn.SrBlock)) } @@ -359,7 +364,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnFalseWhenCurrentRoundIsFinis cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) cns.SetStatus(bn.SrBlock, spos.SsFinished) assert.False(t, cns.CanDoSubroundJob(bn.SrBlock)) @@ -371,7 +376,7 @@ func TestConsensusState_CanDoSubroundJobShouldReturnTrue(t *testing.T) { cns := internalInitConsensusState() cns.Data = make([]byte, 0) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, false) cns.SetStatus(bn.SrBlock, spos.SsNotFinished) assert.True(t, cns.CanDoSubroundJob(bn.SrBlock)) @@ -413,7 +418,7 @@ func TestConsensusState_CanProcessReceivedMessageShouldReturnFalseWhenJobIsDone( PubKey: []byte("1"), } - cns.SetJobDone("1", bn.SrBlock, true) + _ = cns.SetJobDone("1", bn.SrBlock, true) assert.False(t, cns.CanProcessReceivedMessage(cnsDta, 0, bn.SrBlock)) } @@ -455,7 +460,7 @@ func TestConsensusState_GenerateBitmapShouldWork(t *testing.T) { selfIndexInConsensusGroup, _ := cns.SelfConsensusGroupIndex() bitmapExpected[selfIndexInConsensusGroup/8] |= 1 << (uint16(selfIndexInConsensusGroup) % 8) - cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) + _ = cns.SetJobDone(cns.SelfPubKey(), bn.SrBlock, true) bitmap := cns.GenerateBitmap(bn.SrBlock) assert.Equal(t, bitmapExpected, bitmap) diff --git a/consensus/spos/constants.go b/consensus/spos/constants.go index f7e15fe1c52..aeb8bf850e1 100644 --- a/consensus/spos/constants.go +++ b/consensus/spos/constants.go @@ -2,7 +2,3 @@ package spos // maxThresholdPercent specifies the max allocated time percent for doing Job as a percentage of the total time of one round const maxThresholdPercent = 75 - -// MaxRoundsGap defines the maximum expected gap in terms of rounds, between metachain and shardchain, after which -// a block committed and broadcast from shardchain would be visible as notarized in metachain -const MaxRoundsGap = 3 diff --git a/consensus/spos/errors.go b/consensus/spos/errors.go index b03c091bbe6..566c09fb385 100644 --- a/consensus/spos/errors.go +++ b/consensus/spos/errors.go @@ -64,9 +64,6 @@ var ErrNilMessenger = errors.New("messenger is nil") // ErrNilBlockProcessor is raised when a valid block processor is expected but nil used var ErrNilBlockProcessor = errors.New("block processor is nil") -// ErrNilBlocksTracker is raised when a valid block tracker is expected but nil used -var ErrNilBlocksTracker = errors.New("blocks tracker is nil") - // ErrNilBootstrapper is raised when a valid block processor is expected but nil used var ErrNilBootstrapper = errors.New("bootstrapper is nil") @@ -142,9 +139,6 @@ var ErrNilBody = errors.New("body is nil") // ErrNilMetaHeader is raised when an expected meta header is nil var ErrNilMetaHeader = errors.New("meta header is nil") -// ErrNilBroadcastUnnotarisedBlocks is raised when a valid broadcastUnnotarisedBlocks function is expected but nil used -var ErrNilBroadcastUnnotarisedBlocks = errors.New("broadcastUnnotarisedBlocks is nil") - // ErrNilForkDetector is raised when a valid fork detector is expected but nil used var ErrNilForkDetector = errors.New("fork detector is nil") diff --git a/consensus/spos/export_test.go b/consensus/spos/export_test.go index 497c48516ed..343167fee59 100644 --- a/consensus/spos/export_test.go +++ b/consensus/spos/export_test.go @@ -19,14 +19,6 @@ func (wrk *Worker) SetBlockProcessor(blockProcessor process.BlockProcessor) { wrk.blockProcessor = blockProcessor } -func (wrk *Worker) BlockTracker() process.BlocksTracker { - return wrk.blockTracker -} - -func (wrk *Worker) SetBlockTracker(blockTracker process.BlocksTracker) { - wrk.blockTracker = blockTracker -} - func (wrk *Worker) Bootstrapper() process.Bootstrapper { return wrk.bootstrapper } diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index 3443c22a40d..a48dbd2bdc5 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -18,8 +18,6 @@ type ConsensusCoreHandler interface { Blockchain() data.ChainHandler // BlockProcessor gets the BlockProcessor stored in the ConsensusCore BlockProcessor() process.BlockProcessor - // BlocksTracker gets the BlockTracker stored in the ConsensusCore - BlocksTracker() process.BlocksTracker // BootStrapper gets the Bootstrapper stored in the ConsensusCore BootStrapper() process.Bootstrapper // BroadcastMessenger gets the BroadcastMessenger stored in ConsensusCore @@ -38,8 +36,8 @@ type ConsensusCoreHandler interface { ShardCoordinator() sharding.Coordinator // SyncTimer gets the SyncTimer stored in the ConsensusCore SyncTimer() ntp.SyncTimer - // ValidatorGroupSelector gets the ValidatorGroupSelector stored in the ConsensusCore - ValidatorGroupSelector() consensus.ValidatorGroupSelector + // NodesCoordinator gets the NodesCoordinator stored in the ConsensusCore + NodesCoordinator() sharding.NodesCoordinator // RandomnessPrivateKey returns the private key stored in the ConsensusStore used for randomness generation RandomnessPrivateKey() crypto.PrivateKey // RandomnessSingleSigner returns the single signer stored in the ConsensusStore used for randomness generation @@ -88,8 +86,6 @@ type WorkerHandler interface { GetConsensusStateChangedChannel() chan bool //ExecuteStoredMessages tries to execute all the messages received which are valid for execution ExecuteStoredMessages() - //BroadcastUnnotarisedBlocks broadcasts all blocks which are not notarised yet - BroadcastUnnotarisedBlocks() // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool } diff --git a/consensus/spos/sposFactory/sposFactory.go b/consensus/spos/sposFactory/sposFactory.go index ce192c09899..394950911e9 100644 --- a/consensus/spos/sposFactory/sposFactory.go +++ b/consensus/spos/sposFactory/sposFactory.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/spos/bls" "github.com/ElrondNetwork/elrond-go/consensus/spos/bn" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/sharding" @@ -19,6 +20,7 @@ func GetSubroundsFactory( worker spos.WorkerHandler, consensusType string, appStatusHandler core.AppStatusHandler, + indexer indexer.Indexer, ) (spos.SubroundsFactory, error) { switch consensusType { @@ -33,6 +35,8 @@ func GetSubroundsFactory( return nil, err } + subRoundFactoryBls.SetIndexer(indexer) + return subRoundFactoryBls, nil case bnConsensusType: subRoundFactoryBn, err := bn.NewSubroundsFactory(consensusDataContainer, consensusState, worker) @@ -45,6 +49,8 @@ func GetSubroundsFactory( return nil, err } + subRoundFactoryBn.SetIndexer(indexer) + return subRoundFactoryBn, nil } diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index 75bbbb5789d..ac1a04bb3d5 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -21,7 +21,6 @@ import ( type Worker struct { consensusService ConsensusService blockProcessor process.BlockProcessor - blockTracker process.BlocksTracker bootstrapper process.Bootstrapper broadcastMessenger consensus.BroadcastMessenger consensusState *ConsensusState @@ -47,7 +46,6 @@ type Worker struct { func NewWorker( consensusService ConsensusService, blockProcessor process.BlockProcessor, - blockTracker process.BlocksTracker, bootstrapper process.Bootstrapper, broadcastMessenger consensus.BroadcastMessenger, consensusState *ConsensusState, @@ -62,7 +60,6 @@ func NewWorker( err := checkNewWorkerParams( consensusService, blockProcessor, - blockTracker, bootstrapper, broadcastMessenger, consensusState, @@ -81,7 +78,6 @@ func NewWorker( wrk := Worker{ consensusService: consensusService, blockProcessor: blockProcessor, - blockTracker: blockTracker, bootstrapper: bootstrapper, broadcastMessenger: broadcastMessenger, consensusState: consensusState, @@ -108,7 +104,6 @@ func NewWorker( func checkNewWorkerParams( consensusService ConsensusService, blockProcessor process.BlockProcessor, - blockTracker process.BlocksTracker, bootstrapper process.Bootstrapper, broadcastMessenger consensus.BroadcastMessenger, consensusState *ConsensusState, @@ -126,9 +121,6 @@ func checkNewWorkerParams( if blockProcessor == nil || blockProcessor.IsInterfaceNil() { return ErrNilBlockProcessor } - if blockTracker == nil || blockTracker.IsInterfaceNil() { - return ErrNilBlocksTracker - } if bootstrapper == nil || bootstrapper.IsInterfaceNil() { return ErrNilBootstrapper } @@ -394,29 +386,6 @@ func (wrk *Worker) GetConsensusStateChangedChannel() chan bool { return wrk.consensusStateChangedChannel } -//BroadcastUnnotarisedBlocks broadcasts all blocks which are not notarised yet -func (wrk *Worker) BroadcastUnnotarisedBlocks() { - headers := wrk.blockTracker.UnnotarisedBlocks() - for _, header := range headers { - broadcastRound := wrk.blockTracker.BlockBroadcastRound(header.GetNonce()) - if broadcastRound >= wrk.consensusState.RoundIndex-MaxRoundsGap { - continue - } - - err := wrk.broadcastMessenger.BroadcastHeader(header) - if err != nil { - log.Info(err.Error()) - continue - } - - wrk.blockTracker.SetBlockBroadcastRound(header.GetNonce(), wrk.consensusState.RoundIndex) - - log.Info(fmt.Sprintf("%sStep 0: Unnotarised header with nonce %d has been broadcast to metachain\n", - wrk.syncTimer.FormattedCurrentTime(), - header.GetNonce())) - } -} - //ExecuteStoredMessages tries to execute all the messages received which are valid for execution func (wrk *Worker) ExecuteStoredMessages() { wrk.mutReceivedMessages.Lock() diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 6acedc22cf2..84abc510394 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -27,7 +27,6 @@ func initWorker() *spos.Worker { RevertAccountStateCalled: func() { }, } - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -54,7 +53,6 @@ func initWorker() *spos.Worker { sposWorker, _ := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -85,7 +83,6 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -99,7 +96,6 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker(nil, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -118,7 +114,6 @@ func TestWorker_NewWorkerConsensusServiceNilShouldFail(t *testing.T) { func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { t.Parallel() - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -133,7 +128,6 @@ func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker(bnService, nil, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -149,45 +143,10 @@ func TestWorker_NewWorkerBlockProcessorNilShouldFail(t *testing.T) { assert.Equal(t, spos.ErrNilBlockProcessor, err) } -func TestWorker_NewWorkerBlockTrackerNilShouldFail(t *testing.T) { - t.Parallel() - - blockProcessor := &mock.BlockProcessorMock{} - bootstrapperMock := &mock.BootstrapperMock{} - broadcastMessengerMock := &mock.BroadcastMessengerMock{} - consensusState := initConsensusState() - forkDetectorMock := &mock.ForkDetectorMock{} - keyGeneratorMock := &mock.KeyGenMock{} - marshalizerMock := mock.MarshalizerMock{} - rounderMock := initRounderMock() - shardCoordinatorMock := mock.ShardCoordinatorMock{} - singleSignerMock := &mock.SingleSignerMock{} - syncTimerMock := &mock.SyncTimerMock{} - bnService, _ := bn.NewConsensusService() - - wrk, err := spos.NewWorker(bnService, - blockProcessor, - nil, - bootstrapperMock, - broadcastMessengerMock, - consensusState, - forkDetectorMock, - keyGeneratorMock, - marshalizerMock, - rounderMock, - shardCoordinatorMock, - singleSignerMock, - syncTimerMock) - - assert.Nil(t, wrk) - assert.Equal(t, spos.ErrNilBlocksTracker, err) -} - func TestWorker_NewWorkerBootstrapperNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -202,7 +161,6 @@ func TestWorker_NewWorkerBootstrapperNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, nil, broadcastMessengerMock, consensusState, @@ -222,7 +180,6 @@ func TestWorker_NewWorkerBroadcastMessengerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} consensusState := initConsensusState() forkDetectorMock := &mock.ForkDetectorMock{} @@ -237,7 +194,6 @@ func TestWorker_NewWorkerBroadcastMessengerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, nil, consensusState, @@ -256,7 +212,6 @@ func TestWorker_NewWorkerBroadcastMessengerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} forkDetectorMock := &mock.ForkDetectorMock{} @@ -271,7 +226,6 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, nil, @@ -290,7 +244,6 @@ func TestWorker_NewWorkerConsensusStateNilShouldFail(t *testing.T) { func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -305,7 +258,6 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -324,7 +276,6 @@ func TestWorker_NewWorkerForkDetectorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -339,7 +290,6 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -358,7 +308,6 @@ func TestWorker_NewWorkerKeyGeneratorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -373,7 +322,6 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -392,7 +340,6 @@ func TestWorker_NewWorkerMarshalizerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -407,7 +354,6 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -426,7 +372,6 @@ func TestWorker_NewWorkerRounderNilShouldFail(t *testing.T) { func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -441,7 +386,6 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -460,7 +404,6 @@ func TestWorker_NewWorkerShardCoordinatorNilShouldFail(t *testing.T) { func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -475,7 +418,6 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -494,7 +436,6 @@ func TestWorker_NewWorkerSingleSignerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -509,7 +450,6 @@ func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -528,7 +468,6 @@ func TestWorker_NewWorkerSyncTimerNilShouldFail(t *testing.T) { func TestWorker_NewWorkerShouldWork(t *testing.T) { t.Parallel() blockProcessor := &mock.BlockProcessorMock{} - blockTrackerMock := &mock.BlocksTrackerMock{} bootstrapperMock := &mock.BootstrapperMock{} broadcastMessengerMock := &mock.BroadcastMessengerMock{} consensusState := initConsensusState() @@ -544,7 +483,6 @@ func TestWorker_NewWorkerShouldWork(t *testing.T) { wrk, err := spos.NewWorker( bnService, blockProcessor, - blockTrackerMock, bootstrapperMock, broadcastMessengerMock, consensusState, @@ -1381,141 +1319,3 @@ func TestWorker_ExecuteStoredMessagesShouldWork(t *testing.T) { rcvMsg = wrk.ReceivedMessages() assert.Equal(t, 0, len(rcvMsg[msgType])) } - -func TestWorker_BroadcastUnnotarisedBlocksShouldNotBroadcastWhenMaxRoundGapIsNotAchieved(t *testing.T) { - t.Parallel() - - headerHasBeenBroadcast := false - broadcastInRound := int64(0) - - wrk := *initWorker() - header := &block.Header{Nonce: 3} - roundIndex := int64(10) - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - headers := make([]data.HeaderHandler, 0) - headers = append(headers, header) - return headers - }, - BlockBroadcastRoundCalled: func(nonce uint64) int64 { - return broadcastInRound - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - broadcastInRound = round - }, - } - - forkDetector := &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return header.Nonce - }, - } - - wrk.ConsensusState().RoundIndex = int64(roundIndex) - wrk.SetBlockTracker(blockTracker) - wrk.SetForkDetector(forkDetector) - bmm := &mock.BroadcastMessengerMock{ - BroadcastHeaderCalled: func(handler data.HeaderHandler) error { - headerHasBeenBroadcast = true - return nil - }, - } - wrk.SetBroadcastMessenger(bmm) - wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int64(roundIndex-spos.MaxRoundsGap)) - - wrk.BroadcastUnnotarisedBlocks() - assert.False(t, headerHasBeenBroadcast) - assert.Equal(t, int64(roundIndex-spos.MaxRoundsGap), wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) -} - -func TestWorker_BroadcastUnnotarisedBlocksShouldErrWhenBroadcastHeaderFails(t *testing.T) { - t.Parallel() - - broadcastInRound := int64(0) - - var err error - wrk := *initWorker() - header := &block.Header{Nonce: 3} - roundIndex := int64(10) - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - headers := make([]data.HeaderHandler, 0) - headers = append(headers, header) - return headers - }, - BlockBroadcastRoundCalled: func(nonce uint64) int64 { - return broadcastInRound - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - broadcastInRound = round - }, - } - - forkDetector := &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return header.Nonce - }, - } - - wrk.ConsensusState().RoundIndex = int64(roundIndex) - wrk.SetBlockTracker(blockTracker) - wrk.SetForkDetector(forkDetector) - bmm := &mock.BroadcastMessengerMock{ - BroadcastHeaderCalled: func(handler data.HeaderHandler) error { - err = errors.New("broadcast header error") - return err - }, - } - wrk.SetBroadcastMessenger(bmm) - wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int64(roundIndex-spos.MaxRoundsGap-1)) - - wrk.BroadcastUnnotarisedBlocks() - assert.NotNil(t, err) - assert.Equal(t, int64(roundIndex-spos.MaxRoundsGap-1), wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) -} - -func TestWorker_BroadcastUnnotarisedBlocksShouldBroadcast(t *testing.T) { - t.Parallel() - - headerHasBeenBroadcast := false - broadcastInRound := int64(0) - - wrk := *initWorker() - header := &block.Header{Nonce: 3} - roundIndex := int64(10) - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - headers := make([]data.HeaderHandler, 0) - headers = append(headers, header) - return headers - }, - BlockBroadcastRoundCalled: func(nonce uint64) int64 { - return broadcastInRound - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - broadcastInRound = round - }, - } - - forkDetector := &mock.ForkDetectorMock{ - GetHighestFinalBlockNonceCalled: func() uint64 { - return header.Nonce - }, - } - - wrk.ConsensusState().RoundIndex = int64(roundIndex) - wrk.SetBlockTracker(blockTracker) - wrk.SetForkDetector(forkDetector) - bmm := &mock.BroadcastMessengerMock{ - BroadcastHeaderCalled: func(handler data.HeaderHandler) error { - headerHasBeenBroadcast = true - return nil - }, - } - wrk.SetBroadcastMessenger(bmm) - wrk.BlockTracker().SetBlockBroadcastRound(header.Nonce, int64(roundIndex-spos.MaxRoundsGap-1)) - - wrk.BroadcastUnnotarisedBlocks() - assert.True(t, headerHasBeenBroadcast) - assert.Equal(t, roundIndex, wrk.BlockTracker().BlockBroadcastRound(header.Nonce)) -} diff --git a/consensus/validators/errors.go b/consensus/validators/errors.go deleted file mode 100644 index 9276c1ca7f4..00000000000 --- a/consensus/validators/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package validators - -import ( - "errors" -) - -// ErrNilStake signals that a nil stake structure has been provided -var ErrNilStake = errors.New("nil stake") - -// ErrNegativeStake signals that the stake is negative -var ErrNegativeStake = errors.New("negative stake") - -// ErrNilPubKey signals that the public key is nil -var ErrNilPubKey = errors.New("nil public key") diff --git a/consensus/validators/groupSelectors/errors.go b/consensus/validators/groupSelectors/errors.go deleted file mode 100644 index 986f109ddea..00000000000 --- a/consensus/validators/groupSelectors/errors.go +++ /dev/null @@ -1,29 +0,0 @@ -package groupSelectors - -import ( - "errors" -) - -// ErrNilInputSlice signals that a nil slice has been provided -var ErrNilInputSlice = errors.New("nil input slice") - -// ErrSmallEligibleListSize signals that the eligible validators list's size is less than the consensus size -var ErrSmallEligibleListSize = errors.New("small eligible list size") - -// ErrInvalidConsensusGroupSize signals that the consensus size is invalid (e.g. value is negative) -var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") - -// ErrEligibleSelectionMismatch signals a mismatch between the eligible list and the group selection bitmap -var ErrEligibleSelectionMismatch = errors.New("invalid eligible validator selection") - -// ErrEligibleTooManySelections signals an invalid selection for consensus group -var ErrEligibleTooManySelections = errors.New("too many selections for consensus group") - -// ErrEligibleTooFewSelections signals an invalid selection for consensus group -var ErrEligibleTooFewSelections = errors.New("too few selections for consensus group") - -// ErrNilRandomness signals that a nil randomness source has been provided -var ErrNilRandomness = errors.New("nil randomness source") - -// ErrNilHasher signals that a nil hasher has been provided -var ErrNilHasher = errors.New("nil hasher") diff --git a/consensus/validators/groupSelectors/export_test.go b/consensus/validators/groupSelectors/export_test.go deleted file mode 100644 index ba4f0f77611..00000000000 --- a/consensus/validators/groupSelectors/export_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package groupSelectors - -import ( - "github.com/ElrondNetwork/elrond-go/consensus" -) - -func (ihgs *indexHashedGroupSelector) EligibleList() []consensus.Validator { - return ihgs.eligibleList -} diff --git a/consensus/validators/groupSelectors/indexHashedGroup.go b/consensus/validators/groupSelectors/indexHashedGroup.go deleted file mode 100644 index b947a9ba7ca..00000000000 --- a/consensus/validators/groupSelectors/indexHashedGroup.go +++ /dev/null @@ -1,186 +0,0 @@ -package groupSelectors - -import ( - "bytes" - "encoding/binary" - "math/big" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/hashing" -) - -type indexHashedGroupSelector struct { - hasher hashing.Hasher - eligibleList []consensus.Validator - expandedEligibleList []consensus.Validator - consensusGroupSize int -} - -// NewIndexHashedGroupSelector creates a new index hashed group selector -func NewIndexHashedGroupSelector(consensusGroupSize int, hasher hashing.Hasher) (*indexHashedGroupSelector, error) { - if hasher == nil || hasher.IsInterfaceNil() { - return nil, ErrNilHasher - } - - ihgs := &indexHashedGroupSelector{ - hasher: hasher, - eligibleList: make([]consensus.Validator, 0), - expandedEligibleList: make([]consensus.Validator, 0), - } - - err := ihgs.SetConsensusGroupSize(consensusGroupSize) - if err != nil { - return nil, err - } - - return ihgs, nil -} - -// LoadEligibleList loads the eligible list -func (ihgs *indexHashedGroupSelector) LoadEligibleList(eligibleList []consensus.Validator) error { - if eligibleList == nil { - return ErrNilInputSlice - } - - ihgs.eligibleList = make([]consensus.Validator, len(eligibleList)) - copy(ihgs.eligibleList, eligibleList) - return nil -} - -// ComputeValidatorsGroup will generate a list of validators based on the the eligible list, -// consensus group size and a randomness source -// Steps: -// 1. generate expanded eligible list by multiplying entries from eligible list according to stake and rating -> TODO -// 2. for each value in [0, consensusGroupSize), compute proposedindex = Hash( [index as string] CONCAT randomness) % len(eligible list) -// 3. if proposed index is already in the temp validator list, then proposedIndex++ (and then % len(eligible list) as to not -// exceed the maximum index value permitted by the validator list), and then recheck against temp validator list until -// the item at the new proposed index is not found in the list. This new proposed index will be called checked index -// 4. the item at the checked index is appended in the temp validator list -func (ihgs *indexHashedGroupSelector) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []consensus.Validator, err error) { - if len(ihgs.eligibleList) < ihgs.consensusGroupSize { - return nil, ErrSmallEligibleListSize - } - - if randomness == nil { - return nil, ErrNilRandomness - } - - ihgs.expandedEligibleList = ihgs.expandEligibleList() - - tempList := make([]consensus.Validator, 0) - - for startIdx := 0; startIdx < ihgs.consensusGroupSize; startIdx++ { - proposedIndex := ihgs.computeListIndex(startIdx, string(randomness)) - - checkedIndex := ihgs.checkIndex(proposedIndex, tempList) - tempList = append(tempList, ihgs.expandedEligibleList[checkedIndex]) - } - - return tempList, nil -} - -// GetSelectedPublicKeys returns the stringified public keys of the marked validators in the selection bitmap -// TODO: This function needs to be revised when the requirements are clarified -func (ihgs *indexHashedGroupSelector) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { - selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte - shardEligibleLen := uint16(len(ihgs.eligibleList)) - invalidSelection := selectionLen < shardEligibleLen - - if invalidSelection { - return nil, ErrEligibleSelectionMismatch - } - - publicKeys = make([]string, ihgs.consensusGroupSize) - cnt := 0 - - for i := uint16(0); i < shardEligibleLen; i++ { - isSelected := (selection[i/8] & (1 << (i % 8))) != 0 - - if !isSelected { - continue - } - - publicKeys[cnt] = string(ihgs.eligibleList[i].PubKey()) - cnt++ - - if cnt > ihgs.consensusGroupSize { - return nil, ErrEligibleTooManySelections - } - } - - if cnt < ihgs.consensusGroupSize { - return nil, ErrEligibleTooFewSelections - } - - return publicKeys, nil -} - -func (ihgs *indexHashedGroupSelector) expandEligibleList() []consensus.Validator { - //TODO implement an expand eligible list variant - return ihgs.eligibleList -} - -// computeListIndex computes a proposed index from expanded eligible list -func (ihgs *indexHashedGroupSelector) computeListIndex(currentIndex int, randomSource string) int { - buffCurrentIndex := make([]byte, 8) - binary.BigEndian.PutUint64(buffCurrentIndex, uint64(currentIndex)) - - indexHash := ihgs.hasher.Compute(string(buffCurrentIndex) + randomSource) - - computedLargeIndex := big.NewInt(0) - computedLargeIndex.SetBytes(indexHash) - - // computedListIndex = computedLargeIndex % len(expandedEligibleList) - computedListIndex := big.NewInt(0).Mod(computedLargeIndex, big.NewInt(int64(len(ihgs.expandedEligibleList)))).Int64() - return int(computedListIndex) -} - -// checkIndex returns a checked index starting from a proposed index -func (ihgs *indexHashedGroupSelector) checkIndex(proposedIndex int, selectedList []consensus.Validator) int { - - for { - v := ihgs.expandedEligibleList[proposedIndex] - - if ihgs.validatorIsInList(v, selectedList) { - proposedIndex++ - proposedIndex = proposedIndex % len(ihgs.expandedEligibleList) - continue - } - - return proposedIndex - } -} - -// validatorIsInList returns true if a validator has been found in provided list -func (ihgs *indexHashedGroupSelector) validatorIsInList(v consensus.Validator, list []consensus.Validator) bool { - for i := 0; i < len(list); i++ { - if bytes.Equal(v.PubKey(), list[i].PubKey()) { - return true - } - } - - return false -} - -// ConsensusGroupSize returns the consensus group size -func (ihgs *indexHashedGroupSelector) ConsensusGroupSize() int { - return ihgs.consensusGroupSize -} - -// SetConsensusGroupSize sets the consensus group size -func (ihgs *indexHashedGroupSelector) SetConsensusGroupSize(consensusGroupSize int) error { - if consensusGroupSize < 1 { - return ErrInvalidConsensusGroupSize - } - - ihgs.consensusGroupSize = consensusGroupSize - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ihgs *indexHashedGroupSelector) IsInterfaceNil() bool { - if ihgs == nil { - return true - } - return false -} diff --git a/consensus/validators/groupSelectors/indexHashedGroup_test.go b/consensus/validators/groupSelectors/indexHashedGroup_test.go deleted file mode 100644 index dbc8d0fee89..00000000000 --- a/consensus/validators/groupSelectors/indexHashedGroup_test.go +++ /dev/null @@ -1,363 +0,0 @@ -package groupSelectors_test - -import ( - "encoding/binary" - "math/big" - "strconv" - "testing" - - "github.com/ElrondNetwork/elrond-go/consensus" - "github.com/ElrondNetwork/elrond-go/consensus/mock" - "github.com/ElrondNetwork/elrond-go/consensus/validators/groupSelectors" - "github.com/stretchr/testify/assert" -) - -func convertBigIntToBytes(value *big.Int) []byte { - return value.Bytes() -} - -func uint64ToBytes(value uint64) []byte { - buff := make([]byte, 8) - - binary.BigEndian.PutUint64(buff, value) - return buff -} - -//------- NewIndexHashedGroupSelector - -func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { - t.Parallel() - - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(1, nil) - - assert.Nil(t, ihgs) - assert.Equal(t, groupSelectors.ErrNilHasher, err) -} - -func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { - t.Parallel() - - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(0, mock.HasherMock{}) - - assert.Nil(t, ihgs) - assert.Equal(t, groupSelectors.ErrInvalidConsensusGroupSize, err) -} - -func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { - t.Parallel() - - ihgs, err := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) - - assert.NotNil(t, ihgs) - assert.Nil(t, err) -} - -//------- LoadEligibleList - -func TestIndexHashedGroupSelector_LoadEligibleListNilListShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) - - assert.Equal(t, groupSelectors.ErrNilInputSlice, ihgs.LoadEligibleList(nil)) -} - -func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - err := ihgs.LoadEligibleList(list) - assert.Nil(t, err) - assert.Equal(t, list, ihgs.EligibleList()) -} - -//------- ComputeValidatorsGroup - -func TestIndexHashedGroupSelector_ComputeValidatorsGroup0SizeShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) - - list := make([]consensus.Validator, 0) - - list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, list) - assert.Equal(t, groupSelectors.ErrSmallEligibleListSize, err) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupWrongSizeShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(10, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, list) - assert.Equal(t, groupSelectors.ErrSmallEligibleListSize, err) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup(nil) - - assert.Nil(t, list2) - assert.Equal(t, groupSelectors.ErrNilRandomness, err) -} - -//------- functionality tests - -func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { - t.Parallel() - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(1, mock.HasherMock{}) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness")) - - assert.Nil(t, err) - assert.Equal(t, list, list2) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //this will return the list in order: - //element 0 will be first element - //element 1 will be the second - hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(1)) - } - - return nil - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, list, list2) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrder(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //this will return the list in reverse order: - //element 0 will be the second - //element 1 will be the first - hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(1)) - } - - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - return nil - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) - - validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")) - validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")) - - list := []consensus.Validator{ - validator0, - validator1, - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, validator0, list2[1]) - assert.Equal(t, validator1, list2[0]) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //this will return the list in order: - //element 0 will be the first - //element 1 will be the second as the same index is being returned and 0 is already in list - hasher.ComputeCalled = func(s string) []byte { - if string(uint64ToBytes(0))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - if string(uint64ToBytes(1))+randomness == s { - return convertBigIntToBytes(big.NewInt(0)) - } - - return nil - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(2, hasher) - - list := []consensus.Validator{ - mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0")), - mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1")), - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, list, list2) -} - -func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsShouldWork(t *testing.T) { - t.Parallel() - - hasher := &mock.HasherStub{} - - randomness := "randomness" - - //script: - // for index 0, hasher will return 11 which will translate to 1, so 1 is the first element - // for index 1, hasher will return 1 which will translate to 1, 1 is already picked, try the next, 2 is the second element - // for index 2, hasher will return 9 which will translate to 9, 9 is the 3-rd element - // for index 3, hasher will return 9 which will translate to 9, 9 is already picked, try the next one, 0 is the 4-th element - // for index 4, hasher will return 0 which will translate to 0, 0 is already picked, 1 is already picked, 2 is already picked, - // 3 is the 4-th element - // for index 5, hasher will return 9 which will translate to 9, so 9, 0, 1, 2, 3 are already picked, 4 is the 5-th element - - script := make(map[string]*big.Int) - script[string(uint64ToBytes(0))+randomness] = big.NewInt(11) //will translate to 1, add 1 - script[string(uint64ToBytes(1))+randomness] = big.NewInt(1) //will translate to 1, add 2 - script[string(uint64ToBytes(2))+randomness] = big.NewInt(9) //will translate to 9, add 9 - script[string(uint64ToBytes(3))+randomness] = big.NewInt(9) //will translate to 9, add 0 - script[string(uint64ToBytes(4))+randomness] = big.NewInt(0) //will translate to 0, add 3 - script[string(uint64ToBytes(5))+randomness] = big.NewInt(9) //will translate to 9, add 4 - - hasher.ComputeCalled = func(s string) []byte { - val, ok := script[s] - - if !ok { - assert.Fail(t, "should have not got here") - } - - return convertBigIntToBytes(val) - } - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(6, hasher) - - validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0")) - validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1")) - validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2")) - validator3 := mock.NewValidatorMock(big.NewInt(4), 4, []byte("pk3")) - validator4 := mock.NewValidatorMock(big.NewInt(5), 5, []byte("pk4")) - validator5 := mock.NewValidatorMock(big.NewInt(6), 6, []byte("pk5")) - validator6 := mock.NewValidatorMock(big.NewInt(7), 7, []byte("pk6")) - validator7 := mock.NewValidatorMock(big.NewInt(8), 8, []byte("pk7")) - validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8")) - validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9")) - - list := []consensus.Validator{ - validator0, - validator1, - validator2, - validator3, - validator4, - validator5, - validator6, - validator7, - validator8, - validator9, - } - - _ = ihgs.LoadEligibleList(list) - - list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Nil(t, err) - assert.Equal(t, 6, len(list2)) - //check order as described in script - assert.Equal(t, validator1, list2[0]) - assert.Equal(t, validator2, list2[1]) - assert.Equal(t, validator9, list2[2]) - assert.Equal(t, validator0, list2[3]) - assert.Equal(t, validator3, list2[4]) - assert.Equal(t, validator4, list2[5]) - -} - -func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { - consensusGroupSize := 21 - - ihgs, _ := groupSelectors.NewIndexHashedGroupSelector(consensusGroupSize, mock.HasherMock{}) - - list := make([]consensus.Validator, 0) - - //generate 400 validators - for i := 0; i < 400; i++ { - list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)))) - } - _ = ihgs.LoadEligibleList(list) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - randomness := strconv.Itoa(i) - - list2, _ := ihgs.ComputeValidatorsGroup([]byte(randomness)) - - assert.Equal(b, consensusGroupSize, len(list2)) - } -} diff --git a/consensus/validators/validator_test.go b/consensus/validators/validator_test.go deleted file mode 100644 index 9f0cefe817c..00000000000 --- a/consensus/validators/validator_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package validators_test - -import ( - "math/big" - "testing" - - "github.com/ElrondNetwork/elrond-go/consensus/validators" - "github.com/stretchr/testify/assert" -) - -func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(nil, 0, []byte("pk1")) - - assert.Nil(t, validator) - assert.Equal(t, validators.ErrNilStake, err) -} - -func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(big.NewInt(-1), 0, []byte("pk1")) - - assert.Nil(t, validator) - assert.Equal(t, validators.ErrNegativeStake, err) -} - -func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(big.NewInt(0), 0, nil) - - assert.Nil(t, validator) - assert.Equal(t, validators.ErrNilPubKey, err) -} - -func TestValidator_NewValidatorShouldWork(t *testing.T) { - t.Parallel() - - validator, err := validators.NewValidator(big.NewInt(0), 0, []byte("pk1")) - - assert.NotNil(t, validator) - assert.Nil(t, err) -} - -func TestValidator_StakeShouldWork(t *testing.T) { - t.Parallel() - - validator, _ := validators.NewValidator(big.NewInt(1), 0, []byte("pk1")) - - assert.Equal(t, big.NewInt(1), validator.Stake()) -} - -func TestValidator_PubKeyShouldWork(t *testing.T) { - t.Parallel() - - validator, _ := validators.NewValidator(big.NewInt(0), 0, []byte("pk1")) - - assert.Equal(t, []byte("pk1"), validator.PubKey()) -} diff --git a/core/computers.go b/core/computers.go index d75351537cc..9552f3888b5 100644 --- a/core/computers.go +++ b/core/computers.go @@ -1,9 +1,49 @@ package core -// Max returns the maximum number between two given -func Max(a uint32, b uint32) uint32 { +// MaxInt32 returns the maximum of two given numbers +func MaxInt32(a int32, b int32) int32 { if a > b { return a } return b } + +// MinInt32 returns the minimum of two given numbers +func MinInt32(a int32, b int32) int32 { + if a < b { + return a + } + return b +} + +// MaxUint32 returns the maximum of two given numbers +func MaxUint32(a uint32, b uint32) uint32 { + if a > b { + return a + } + return b +} + +// MinUint32 returns the minimum of two given numbers +func MinUint32(a uint32, b uint32) uint32 { + if a < b { + return a + } + return b +} + +// MaxUint64 returns the maximum of two given numbers +func MaxUint64(a uint64, b uint64) uint64 { + if a > b { + return a + } + return b +} + +// MinUint64 returns the minimum of two given numbers +func MinUint64(a uint64, b uint64) uint64 { + if a < b { + return a + } + return b +} diff --git a/core/computers_test.go b/core/computers_test.go index 0d71dffe362..397f3382879 100644 --- a/core/computers_test.go +++ b/core/computers_test.go @@ -7,14 +7,74 @@ import ( "github.com/stretchr/testify/assert" ) -func TestMaxShouldReturnA(t *testing.T) { +func TestMaxInt32ShouldReturnA(t *testing.T) { + a := int32(-1) + b := int32(-2) + assert.Equal(t, a, core.MaxInt32(a, b)) +} + +func TestMaxInt32ShouldReturnB(t *testing.T) { + a := int32(-2) + b := int32(-1) + assert.Equal(t, b, core.MaxInt32(a, b)) +} + +func TestMinInt32ShouldReturnB(t *testing.T) { + a := int32(-1) + b := int32(-2) + assert.Equal(t, b, core.MinInt32(a, b)) +} + +func TestMinInt32ShouldReturnA(t *testing.T) { + a := int32(-2) + b := int32(-1) + assert.Equal(t, a, core.MinInt32(a, b)) +} + +func TestMaxUint32ShouldReturnA(t *testing.T) { + a := uint32(11) + b := uint32(10) + assert.Equal(t, a, core.MaxUint32(a, b)) +} + +func TestMaxUint32ShouldReturnB(t *testing.T) { + a := uint32(10) + b := uint32(11) + assert.Equal(t, b, core.MaxUint32(a, b)) +} + +func TestMinUint32ShouldReturnB(t *testing.T) { a := uint32(11) b := uint32(10) - assert.Equal(t, a, core.Max(a, b)) + assert.Equal(t, b, core.MinUint32(a, b)) } -func TestMaxShouldReturnB(t *testing.T) { +func TestMinUint32ShouldReturnA(t *testing.T) { a := uint32(10) b := uint32(11) - assert.Equal(t, b, core.Max(a, b)) + assert.Equal(t, a, core.MinUint32(a, b)) +} + +func TestMaxUint64ShouldReturnA(t *testing.T) { + a := uint64(11) + b := uint64(10) + assert.Equal(t, a, core.MaxUint64(a, b)) +} + +func TestMaxUint64ShouldReturnB(t *testing.T) { + a := uint64(10) + b := uint64(11) + assert.Equal(t, b, core.MaxUint64(a, b)) +} + +func TestMinUint64ShouldReturnB(t *testing.T) { + a := uint64(11) + b := uint64(10) + assert.Equal(t, b, core.MinUint64(a, b)) +} + +func TestMinUint64ShouldReturnA(t *testing.T) { + a := uint64(10) + b := uint64(11) + assert.Equal(t, a, core.MinUint64(a, b)) } diff --git a/core/constants.go b/core/constants.go index a0d6eb352b8..e88ebd0aab9 100644 --- a/core/constants.go +++ b/core/constants.go @@ -152,3 +152,9 @@ const MetricNumShardHeadersProcessed = "erd_num_shard_headers_processed" // MetricNumTimesInForkChoice is the metric that counts how many time a node was in fork choice const MetricNumTimesInForkChoice = "erd_fork_choice_count" + +// MaxMiniBlocksInBlock specifies the max number of mini blocks which can be added in one block +const MaxMiniBlocksInBlock = 100 + +//MetricHighestFinalBlockInShard is the metric that stores the highest nonce block notarized by metachain for current shard +const MetricHighestFinalBlockInShard = "erd_highest_notarized_block_by_metachain_for_current_shard" diff --git a/core/errors.go b/core/errors.go index 87add20851b..dbc2989a41a 100644 --- a/core/errors.go +++ b/core/errors.go @@ -39,3 +39,6 @@ var ErrEmptyFile = errors.New("empty file provided") // ErrInvalidIndex signals that an invalid private key index has been provided var ErrInvalidIndex = errors.New("invalid private key index") + +// ErrNotPositiveValue signals that a 0 or negative value has been provided +var ErrNotPositiveValue = errors.New("the provided value is not positive") diff --git a/core/indexer/data.go b/core/indexer/data.go index 21917298663..4041e166202 100644 --- a/core/indexer/data.go +++ b/core/indexer/data.go @@ -13,7 +13,8 @@ type Transaction struct { MBHash string `json:"miniBlockHash"` BlockHash string `json:"blockHash"` Nonce uint64 `json:"nonce"` - Value *big.Int `json:"value"` + Round uint64 `json:"round"` + Value string `json:"value"` Receiver string `json:"receiver"` Sender string `json:"sender"` ReceiverShard uint32 `json:"receiverShard"` @@ -31,10 +32,11 @@ type Transaction struct { // plus some extra information for ease of search and filter type Block struct { Nonce uint64 `json:"nonce"` + Round uint64 `json:"round"` ShardID uint32 `json:"shardId"` Hash string `json:"hash"` - Proposer string `json:"proposer"` - Validators []string `json:"validators"` + Proposer uint64 `json:"proposer"` + Validators []uint64 `json:"validators"` PubKeyBitmap string `json:"pubKeyBitmap"` Size int64 `json:"size"` Timestamp time.Duration `json:"timestamp"` @@ -43,6 +45,20 @@ type Block struct { PrevHash string `json:"prevHash"` } +//ValidatorsPublicKeys is a structure containing fields for validators public keys +type ValidatorsPublicKeys struct { + PublicKeys []string `json:"publicKeys"` +} + +// RoundInfo is a structure containing block signers and shard id +type RoundInfo struct { + Index uint64 `json:"-"` + SignersIndexes []uint64 `json:"signersIndexes"` + BlockWasProposed bool `json:"blockWasProposed"` + ShardId uint32 `json:"shardId"` + Timestamp time.Duration `json:"timestamp"` +} + // TPS is a structure containing all the fields that need to // be saved for a shard statistic in the database type TPS struct { diff --git a/core/indexer/elasticsearch.go b/core/indexer/elasticsearch.go index a6c126cb2cb..264bd4aa7ca 100644 --- a/core/indexer/elasticsearch.go +++ b/core/indexer/elasticsearch.go @@ -8,6 +8,7 @@ import ( "io" "math/big" "net/http" + "strconv" "strings" "time" @@ -16,6 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/hashing" @@ -30,6 +32,8 @@ const txBulkSize = 1000 const txIndex = "transactions" const blockIndex = "blocks" const tpsIndex = "tps" +const validatorsIndex = "validators" +const roundIndex = "rounds" const metachainTpsDocID = "meta" const shardTpsDocIDPrefix = "shard" @@ -49,6 +53,7 @@ type elasticIndexer struct { hasher hashing.Hasher logger *logger.Logger options *Options + isNilIndexer bool } // NewElasticIndexer creates a new elasticIndexer where the server listens on the url, authentication for the server is @@ -92,6 +97,7 @@ func NewElasticIndexer( hasher, logger, options, + false, } err = indexer.checkAndCreateIndex(blockIndex, timestampMapping()) @@ -109,6 +115,16 @@ func NewElasticIndexer( return nil, err } + err = indexer.checkAndCreateIndex(validatorsIndex, nil) + if err != nil { + return nil, err + } + + err = indexer.checkAndCreateIndex(roundIndex, timestampMapping()) + if err != nil { + return nil, err + } + return indexer, nil } @@ -197,7 +213,9 @@ func (ei *elasticIndexer) createIndex(index string, body io.Reader) error { func (ei *elasticIndexer) SaveBlock( bodyHandler data.BodyHandler, headerhandler data.HeaderHandler, - txPool map[string]data.TransactionHandler) { + txPool map[string]data.TransactionHandler, + signersIndexes []uint64, +) { if headerhandler == nil || headerhandler.IsInterfaceNil() { ei.logger.Warn(ErrNoHeader.Error()) @@ -210,7 +228,7 @@ func (ei *elasticIndexer) SaveBlock( return } - go ei.saveHeader(headerhandler) + go ei.saveHeader(headerhandler, signersIndexes) if len(body) == 0 { ei.logger.Warn(ErrNoMiniblocks.Error()) @@ -222,7 +240,100 @@ func (ei *elasticIndexer) SaveBlock( } } -func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.HeaderHandler) ([]byte, []byte) { +// SaveMetaBlock will index a meta block in elastic search +func (ei *elasticIndexer) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + if header == nil || header.IsInterfaceNil() { + ei.logger.Warn(ErrNoHeader.Error()) + return + } + + go ei.saveHeader(header, signersIndexes) +} + +// SaveRoundInfo will save data about a round on elastic search +func (ei *elasticIndexer) SaveRoundInfo(roundInfo RoundInfo) { + var buff bytes.Buffer + + marshalizedRoundInfo, err := ei.marshalizer.Marshal(roundInfo) + if err != nil { + ei.logger.Warn("could not marshal signers indexes") + return + } + + buff.Grow(len(marshalizedRoundInfo)) + buff.Write(marshalizedRoundInfo) + + req := esapi.IndexRequest{ + Index: roundIndex, + DocumentID: strconv.FormatUint(uint64(roundInfo.ShardId), 10) + "_" + strconv.FormatUint(roundInfo.Index, 10), + Body: bytes.NewReader(buff.Bytes()), + Refresh: "true", + } + + res, err := req.Do(context.Background(), ei.db) + if err != nil { + ei.logger.Warn(fmt.Sprintf("Could not index round informations: %s", err)) + return + } + + defer closeESResponseBody(res) + + if res.IsError() { + ei.logger.Warn(res.String()) + } +} + +//SaveValidatorsPubKeys will send all validators public keys to elastic search +func (ei *elasticIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + valPubKeys := make(map[uint32][]string, 0) + for shardId, shardPubKeys := range validatorsPubKeys { + for _, pubKey := range shardPubKeys { + valPubKeys[shardId] = append(valPubKeys[shardId], hex.EncodeToString(pubKey)) + } + + go ei.saveShardValidatorsPubKeys(shardId, valPubKeys[shardId]) + } +} + +// IsNilIndexer will return a bool value that signals if the indexer's implementation is a NilIndexer +func (ei *elasticIndexer) IsNilIndexer() bool { + return ei.isNilIndexer +} + +func (ei *elasticIndexer) saveShardValidatorsPubKeys(shardId uint32, shardValidatorsPubKeys []string) { + var buff bytes.Buffer + + shardValPubKeys := ValidatorsPublicKeys{PublicKeys: shardValidatorsPubKeys} + marshalizedValidatorPubKeys, err := ei.marshalizer.Marshal(shardValPubKeys) + if err != nil { + ei.logger.Warn("could not marshal validators public keys") + return + } + + buff.Grow(len(marshalizedValidatorPubKeys)) + buff.Write(marshalizedValidatorPubKeys) + + req := esapi.IndexRequest{ + Index: validatorsIndex, + DocumentID: strconv.FormatUint(uint64(shardId), 10), + Body: bytes.NewReader(buff.Bytes()), + Refresh: "true", + } + + res, err := req.Do(context.Background(), ei.db) + if err != nil { + ei.logger.Warn(fmt.Sprintf("Could not index validators public keys: %s", err)) + return + } + + defer closeESResponseBody(res) + + if res.IsError() { + ei.logger.Warn(res.String()) + } +} + +func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.HeaderHandler, signersIndexes []uint64) ([]byte, []byte) { h, err := ei.marshalizer.Marshal(header) if err != nil { ei.logger.Warn("could not marshal header") @@ -231,12 +342,12 @@ func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.Hea headerHash := ei.hasher.Compute(string(h)) elasticBlock := Block{ - Nonce: header.GetNonce(), - ShardID: header.GetShardID(), - Hash: hex.EncodeToString(headerHash), - // TODO: We should add functionality for proposer and validators - Proposer: hex.EncodeToString([]byte("mock proposer")), - //Validators: "mock validators", + Nonce: header.GetNonce(), + Round: header.GetRound(), + ShardID: header.GetShardID(), + Hash: hex.EncodeToString(headerHash), + Proposer: signersIndexes[0], + Validators: signersIndexes, PubKeyBitmap: hex.EncodeToString(header.GetPubKeysBitmap()), Size: int64(len(h)), Timestamp: time.Duration(header.GetTimeStamp()), @@ -254,10 +365,10 @@ func (ei *elasticIndexer) getSerializedElasticBlockAndHeaderHash(header data.Hea return serializedBlock, headerHash } -func (ei *elasticIndexer) saveHeader(header data.HeaderHandler) { +func (ei *elasticIndexer) saveHeader(header data.HeaderHandler, signersIndexes []uint64) { var buff bytes.Buffer - serializedBlock, headerHash := ei.getSerializedElasticBlockAndHeaderHash(header) + serializedBlock, headerHash := ei.getSerializedElasticBlockAndHeaderHash(header, signersIndexes) buff.Grow(len(serializedBlock)) buff.Write(serializedBlock) @@ -505,6 +616,11 @@ func getTransactionByType( return buildSmartContractResult(currentSc, txHash, mbHash, blockHash, mb, header) } + currentReward, ok := tx.(*rewardTx.RewardTx) + if ok && currentReward != nil { + return buildRewardTransaction(currentReward, txHash, mbHash, blockHash, mb, header) + } + return nil } @@ -522,7 +638,8 @@ func buildTransaction( MBHash: hex.EncodeToString(mbHash), BlockHash: hex.EncodeToString(blockHash), Nonce: tx.Nonce, - Value: tx.Value, + Round: header.GetRound(), + Value: tx.Value.String(), Receiver: hex.EncodeToString(tx.RcvAddr), Sender: hex.EncodeToString(tx.SndAddr), ReceiverShard: mb.ReceiverShardID, @@ -549,7 +666,8 @@ func buildSmartContractResult( MBHash: hex.EncodeToString(mbHash), BlockHash: hex.EncodeToString(blockHash), Nonce: scr.Nonce, - Value: scr.Value, + Round: header.GetRound(), + Value: scr.Value.String(), Receiver: hex.EncodeToString(scr.RcvAddr), Sender: hex.EncodeToString(scr.SndAddr), ReceiverShard: mb.ReceiverShardID, @@ -562,3 +680,34 @@ func buildSmartContractResult( Status: "Success", } } + +func buildRewardTransaction( + rTx *rewardTx.RewardTx, + txHash []byte, + mbHash []byte, + blockHash []byte, + mb *block.MiniBlock, + header data.HeaderHandler, +) *Transaction { + + shardIdStr := fmt.Sprintf("Shard%d", rTx.ShardId) + + return &Transaction{ + Hash: hex.EncodeToString(txHash), + MBHash: hex.EncodeToString(mbHash), + BlockHash: hex.EncodeToString(blockHash), + Nonce: 0, + Round: rTx.Round, + Value: rTx.Value.String(), + Receiver: hex.EncodeToString(rTx.RcvAddr), + Sender: shardIdStr, + ReceiverShard: mb.ReceiverShardID, + SenderShard: mb.SenderShardID, + GasPrice: 0, + GasLimit: 0, + Data: "", + Signature: "", + Timestamp: time.Duration(header.GetTimeStamp()), + Status: "Success", + } +} diff --git a/core/indexer/elasticsearch_test.go b/core/indexer/elasticsearch_test.go index 7e78c0f64c4..a16007906b8 100644 --- a/core/indexer/elasticsearch_test.go +++ b/core/indexer/elasticsearch_test.go @@ -267,8 +267,9 @@ func TestNewElasticIndexerIncorrectUrl(t *testing.T) { func TestElasticIndexer_getSerializedElasticBlockAndHeaderHash(t *testing.T) { ei := indexer.NewTestElasticIndexer(url, username, password, shardCoordinator, marshalizer, hasher, log, &indexer.Options{}) header := newTestBlockHeader() + signersIndexes := []uint64{0, 1, 2, 3} - serializedBlock, headerHash := ei.GetSerializedElasticBlockAndHeaderHash(header) + serializedBlock, headerHash := ei.GetSerializedElasticBlockAndHeaderHash(header, signersIndexes) h, _ := marshalizer.Marshal(header) expectedHeaderHash := hasher.Compute(string(h)) @@ -276,9 +277,11 @@ func TestElasticIndexer_getSerializedElasticBlockAndHeaderHash(t *testing.T) { elasticBlock := indexer.Block{ Nonce: header.Nonce, + Round: header.Round, ShardID: header.ShardId, Hash: hex.EncodeToString(headerHash), - Proposer: hex.EncodeToString([]byte("mock proposer")), + Proposer: signersIndexes[0], + Validators: signersIndexes, PubKeyBitmap: hex.EncodeToString(header.PubKeysBitmap), Size: int64(len(h)), Timestamp: time.Duration(header.TimeStamp), diff --git a/core/indexer/export_test.go b/core/indexer/export_test.go index cd9913c9e24..89808723eac 100644 --- a/core/indexer/export_test.go +++ b/core/indexer/export_test.go @@ -37,13 +37,13 @@ func NewTestElasticIndexer( es, _ := elasticsearch.NewClient(cfg) indexer := elasticIndexer{es, shardCoordinator, - marshalizer, hasher, logger, options} + marshalizer, hasher, logger, options, false} return ElasticIndexer{indexer} } -func (ei *ElasticIndexer) GetSerializedElasticBlockAndHeaderHash(header data.HeaderHandler) ([]byte, []byte) { - return ei.getSerializedElasticBlockAndHeaderHash(header) +func (ei *ElasticIndexer) GetSerializedElasticBlockAndHeaderHash(header data.HeaderHandler, signersIndexes []uint64) ([]byte, []byte) { + return ei.getSerializedElasticBlockAndHeaderHash(header, signersIndexes) } func (ei *ElasticIndexer) BuildTransactionBulks( diff --git a/core/indexer/interface.go b/core/indexer/interface.go index af9bb2afde2..94ca33be898 100644 --- a/core/indexer/interface.go +++ b/core/indexer/interface.go @@ -8,7 +8,11 @@ import ( // Indexer is an interface for saving node specific data to other storage. // This could be an elasticsearch index, a MySql database or any other external services. type Indexer interface { - SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) + SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) + SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) + SaveRoundInfo(roundInfo RoundInfo) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) + SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) IsInterfaceNil() bool + IsNilIndexer() bool } diff --git a/core/indexer/nilIndexer.go b/core/indexer/nilIndexer.go new file mode 100644 index 00000000000..de2ced5d168 --- /dev/null +++ b/core/indexer/nilIndexer.go @@ -0,0 +1,52 @@ +package indexer + +import ( + "github.com/ElrondNetwork/elrond-go/core/statistics" + "github.com/ElrondNetwork/elrond-go/data" +) + +// NilIndexer will be used when an Indexer is required, but another one isn't necessary or available +type NilIndexer struct { +} + +// NewNilIndexer will return a Nil indexer +func NewNilIndexer() *NilIndexer { + return new(NilIndexer) +} + +// SaveBlock will do nothing +func (ni *NilIndexer) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { + return +} + +func (im *NilIndexer) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + return +} + +// SaveRoundInfo will do nothing +func (ni *NilIndexer) SaveRoundInfo(info RoundInfo) { + return +} + +// UpdateTPS will do nothing +func (ni *NilIndexer) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { + return +} + +// SaveValidatorsPubKeys will do nothing +func (ni *NilIndexer) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + return +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ni *NilIndexer) IsInterfaceNil() bool { + if ni == nil { + return true + } + return false +} + +// IsNilIndexer will return a bool value that signals if the indexer's implementation is a NilIndexer +func (ni *NilIndexer) IsNilIndexer() bool { + return true +} diff --git a/core/logger/redirectStderrDarwin.go b/core/logger/redirectStderrDarwin.go new file mode 100644 index 00000000000..e7fba6f64a4 --- /dev/null +++ b/core/logger/redirectStderrDarwin.go @@ -0,0 +1,18 @@ +//+build darwin + +package logger + +import ( + "os" + "syscall" +) + +// redirectStderr redirects the output of the stderr to the file passed in +func redirectStderr(f *os.File) error { + err := syscall.Dup2(int(f.Fd()), int(os.Stderr.Fd())) + if err != nil { + return err + } + + return nil +} diff --git a/core/logger/redirectStderrLinux.go b/core/logger/redirectStderrLinux.go index 7aed514fd69..7316f8c3827 100644 --- a/core/logger/redirectStderrLinux.go +++ b/core/logger/redirectStderrLinux.go @@ -1,4 +1,4 @@ -//+build linux darwin +//+build linux package logger diff --git a/core/mock/indexerMock.go b/core/mock/indexerMock.go index 94951d07fa4..d89a1ac4085 100644 --- a/core/mock/indexerMock.go +++ b/core/mock/indexerMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" @@ -11,14 +12,26 @@ type IndexerMock struct { SaveBlockCalled func(body block.Body, header *block.Header) } -func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { +func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { panic("implement me") } +func (im *IndexerMock) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + return +} + func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } +func (im *IndexerMock) SaveRoundInfo(roundInfo indexer.RoundInfo) { + panic("implement me") +} + +func (im *IndexerMock) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (im *IndexerMock) IsInterfaceNil() bool { if im == nil { @@ -26,3 +39,7 @@ func (im *IndexerMock) IsInterfaceNil() bool { } return false } + +func (im *IndexerMock) IsNilIndexer() bool { + return false +} diff --git a/core/throttler/numGoRoutineThrottler.go b/core/throttler/numGoRoutineThrottler.go new file mode 100644 index 00000000000..c72d8d06066 --- /dev/null +++ b/core/throttler/numGoRoutineThrottler.go @@ -0,0 +1,49 @@ +package throttler + +import ( + "sync/atomic" + + "github.com/ElrondNetwork/elrond-go/core" +) + +// NumGoRoutineThrottler can limit the number of go routines launched +type NumGoRoutineThrottler struct { + max int32 + counter int32 +} + +// NewNumGoRoutineThrottler creates a new num go routine throttler instance +func NewNumGoRoutineThrottler(max int32) (*NumGoRoutineThrottler, error) { + if max <= 0 { + return nil, core.ErrNotPositiveValue + } + + return &NumGoRoutineThrottler{ + max: max, + }, nil +} + +// CanProcess returns true if current counter is less than max +func (ngrt *NumGoRoutineThrottler) CanProcess() bool { + valCounter := atomic.LoadInt32(&ngrt.counter) + + return valCounter < ngrt.max +} + +// StartProcessing will increment current counter +func (ngrt *NumGoRoutineThrottler) StartProcessing() { + atomic.AddInt32(&ngrt.counter, 1) +} + +// EndProcessing will decrement current counter +func (ngrt *NumGoRoutineThrottler) EndProcessing() { + atomic.AddInt32(&ngrt.counter, -1) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ngrt *NumGoRoutineThrottler) IsInterfaceNil() bool { + if ngrt == nil { + return true + } + return false +} diff --git a/core/throttler/numGoRoutineThrottler_test.go b/core/throttler/numGoRoutineThrottler_test.go new file mode 100644 index 00000000000..ebd0ac6d606 --- /dev/null +++ b/core/throttler/numGoRoutineThrottler_test.go @@ -0,0 +1,93 @@ +package throttler_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/throttler" + "github.com/stretchr/testify/assert" +) + +func TestNewNumGoRoutineThrottler_WithNegativeShouldError(t *testing.T) { + t.Parallel() + + nt, err := throttler.NewNumGoRoutineThrottler(-1) + + assert.Nil(t, nt) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + +func TestNewNumGoRoutineThrottler_WithZeroShouldError(t *testing.T) { + t.Parallel() + + nt, err := throttler.NewNumGoRoutineThrottler(0) + + assert.Nil(t, nt) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + +func TestNewNumGoRoutineThrottler_ShouldWork(t *testing.T) { + t.Parallel() + + nt, err := throttler.NewNumGoRoutineThrottler(1) + + assert.NotNil(t, nt) + assert.Nil(t, err) +} + +func TestNumGoRoutineThrottler_CanProcessMessageWithZeroCounter(t *testing.T) { + t.Parallel() + + nt, _ := throttler.NewNumGoRoutineThrottler(1) + + assert.True(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterEqualsMax(t *testing.T) { + t.Parallel() + + nt, _ := throttler.NewNumGoRoutineThrottler(1) + nt.StartProcessing() + + assert.False(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterIsMaxLessThanOne(t *testing.T) { + t.Parallel() + + max := int32(45) + nt, _ := throttler.NewNumGoRoutineThrottler(max) + + for i := int32(0); i < max-1; i++ { + nt.StartProcessing() + } + + assert.True(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterIsMax(t *testing.T) { + t.Parallel() + + max := int32(45) + nt, _ := throttler.NewNumGoRoutineThrottler(max) + + for i := int32(0); i < max; i++ { + nt.StartProcessing() + } + + assert.False(t, nt.CanProcess()) +} + +func TestNumGoRoutineThrottler_CanProcessMessageCounterIsMaxLessOneFromEndProcessMessage(t *testing.T) { + t.Parallel() + + max := int32(45) + nt, _ := throttler.NewNumGoRoutineThrottler(max) + + for i := int32(0); i < max; i++ { + nt.StartProcessing() + } + nt.EndProcessing() + + assert.True(t, nt.CanProcess()) +} diff --git a/data/address/specialAddresses.go b/data/address/specialAddresses.go new file mode 100644 index 00000000000..db5df6a5a28 --- /dev/null +++ b/data/address/specialAddresses.go @@ -0,0 +1,168 @@ +package address + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type specialAddresses struct { + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData + elrondAddress []byte + burnAddress []byte + + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator +} + +// NewSpecialAddressHolder creates a special address holder +func NewSpecialAddressHolder( + elrondAddress []byte, + burnAddress []byte, + adrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, +) (*specialAddresses, error) { + if elrondAddress == nil { + return nil, data.ErrNilElrondAddress + } + if burnAddress == nil { + return nil, data.ErrNilBurnAddress + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, data.ErrNilAddressConverter + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, data.ErrNilShardCoordinator + } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, data.ErrNilNodesCoordinator + } + + sp := &specialAddresses{ + elrondAddress: elrondAddress, + burnAddress: burnAddress, + adrConv: adrConv, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + metaConsensusData: make([]*data.ConsensusRewardData, 0), + } + + return sp, nil +} + +// SetShardConsensusData - sets the reward addresses for the current consensus group +func (sp *specialAddresses) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardID uint32) error { + // give transaction coordinator the consensus group validators addresses where to send the rewards. + consensusAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + randomness, round, shardID, + ) + if err != nil { + return err + } + + sp.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: consensusAddresses, + } + + return nil +} + +// SetElrondCommunityAddress sets elrond address +func (sp *specialAddresses) SetElrondCommunityAddress(elrond []byte) { + sp.elrondAddress = elrond +} + +// ElrondCommunityAddress provides elrond address +func (sp *specialAddresses) ElrondCommunityAddress() []byte { + return sp.elrondAddress +} + +// BurnAddress provides burn address +func (sp *specialAddresses) BurnAddress() []byte { + return sp.burnAddress +} + +// ConsensusShardRewardData provides the consensus data required for generating the rewards for shard nodes +func (sp *specialAddresses) ConsensusShardRewardData() *data.ConsensusRewardData { + return sp.shardConsensusData +} + +// SetMetaConsensusData sets the rewards addresses for the metachain nodes +func (sp *specialAddresses) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { + rewardAddresses, err := sp.nodesCoordinator.GetValidatorsRewardsAddresses( + randomness, + round, + sharding.MetachainShardId, + ) + if err != nil { + return err + } + + sp.metaConsensusData = append(sp.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: rewardAddresses, + }) + + return nil +} + +// ClearMetaConsensusData clears the previously set addresses for rewarding metachain nodes +func (sp *specialAddresses) ClearMetaConsensusData() { + sp.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +// ConsensusMetaRewardData provides the consensus data required for generating the rewards for metachain nodes +func (sp *specialAddresses) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sp.metaConsensusData +} + +// LeaderAddress provides leader address +func (sp *specialAddresses) LeaderAddress() []byte { + if sp.shardConsensusData == nil || len(sp.shardConsensusData.Addresses) == 0 { + return nil + } + + return []byte(sp.shardConsensusData.Addresses[0]) +} + +// Round returns the round for the current block +func (sp *specialAddresses) Round() uint64 { + if sp.shardConsensusData == nil { + return 0 + } + + return sp.shardConsensusData.Round +} + +// Epoch returns the epoch for the current block +func (sp *specialAddresses) Epoch() uint32 { + if sp.shardConsensusData == nil { + return 0 + } + + return sp.shardConsensusData.Epoch +} + +// ShardIdForAddress calculates shard id for address +func (sp *specialAddresses) ShardIdForAddress(pubKey []byte) (uint32, error) { + convAdr, err := sp.adrConv.CreateAddressFromPublicKeyBytes(pubKey) + if err != nil { + return 0, err + } + + return sp.shardCoordinator.ComputeId(convAdr), nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sp *specialAddresses) IsInterfaceNil() bool { + if sp == nil { + return true + } + return false +} diff --git a/data/address/specialAddresses_test.go b/data/address/specialAddresses_test.go new file mode 100644 index 00000000000..6fbf64f894a --- /dev/null +++ b/data/address/specialAddresses_test.go @@ -0,0 +1,307 @@ +package address + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +type Args struct { + ElrondCommunityAddress []byte + BurnAddress []byte + AddrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + NodesCoordiator sharding.NodesCoordinator +} + +func initDefaultArgs() *Args { + args := &Args{ + ElrondCommunityAddress: []byte("community"), + BurnAddress: []byte("burn"), + AddrConv: &mock.AddressConverterMock{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(1), + NodesCoordiator: mock.NewNodesCoordinatorMock(), + } + + return args +} + +func createSpecialAddressFromArgs(args *Args) (process.SpecialAddressHandler, error) { + addr, err := NewSpecialAddressHolder( + args.ElrondCommunityAddress, + args.BurnAddress, + args.AddrConv, + args.ShardCoordinator, + args.NodesCoordiator, + ) + return addr, err +} + +func createDefaultSpecialAddress() process.SpecialAddressHandler { + args := initDefaultArgs() + addr, _ := createSpecialAddressFromArgs(args) + + return addr +} + +func TestNewSpecialAddressHolderNilCommunityAddressShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.ElrondCommunityAddress = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilElrondAddress, err) +} + +func TestNewSpecialAddressHolderNilBurnAddressShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.BurnAddress = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilBurnAddress, err) +} + +func TestNewSpecialAddressHolderNilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.AddrConv = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilAddressConverter, err) +} + +func TestNewSpecialAddressHolderNilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.ShardCoordinator = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilShardCoordinator, err) +} + +func TestNewSpecialAddressHolderNilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + args.NodesCoordiator = nil + addr, err := createSpecialAddressFromArgs(args) + + assert.Nil(t, addr) + assert.Equal(t, data.ErrNilNodesCoordinator, err) +} + +func TestNewSpecialAddressHolderOK(t *testing.T) { + t.Parallel() + + args := initDefaultArgs() + addr, err := createSpecialAddressFromArgs(args) + + assert.NotNil(t, addr) + assert.Nil(t, err) +} + +func TestSpecialAddresses_ClearMetaConsensusDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + addr.ClearMetaConsensusData() + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 0, len(metaConsensusData)) +} + +func TestSpecialAddresses_SetMetaConsensusDataSettingOnceOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + err := addr.SetMetaConsensusData([]byte("randomness"), 0, 0) + assert.Nil(t, err) +} + +func TestSpecialAddresses_SetMetaConsensusDataSettingMultipleOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + nConsensuses := 10 + + for i := 0; i < nConsensuses; i++ { + err := addr.SetMetaConsensusData([]byte("randomness"), uint64(i), 0) + assert.Nil(t, err) + } +} + +func TestSpecialAddresses_ConsensusMetaRewardDataNoConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 0, len(metaConsensusData)) +} + +func TestSpecialAddresses_ConsensusMetaRewardDataOneConsensusDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + + _ = addr.SetMetaConsensusData([]byte("randomness"), 1, 2) + metaConsensusData := addr.ConsensusMetaRewardData() + + assert.Equal(t, 1, len(metaConsensusData)) + assert.Equal(t, uint64(1), metaConsensusData[0].Round) + assert.Equal(t, uint32(2), metaConsensusData[0].Epoch) +} + +func TestSpecialAddresses_ConsensusMetaRewardDataMultipleConsensusesDataOK(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + nConsensuses := 10 + + for i := 0; i < nConsensuses; i++ { + _ = addr.SetMetaConsensusData([]byte("randomness"), uint64(i+1), uint32(i+2)) + } + + metaConsensusData := addr.ConsensusMetaRewardData() + assert.Equal(t, nConsensuses, len(metaConsensusData)) + + for i := 0; i < nConsensuses; i++ { + assert.Equal(t, uint64(i+1), metaConsensusData[i].Round) + assert.Equal(t, uint32(i+2), metaConsensusData[i].Epoch) + } +} + +func TestSpecialAddresses_ConsensusShardRewardDataNoData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + shardRewardData := addr.ConsensusShardRewardData() + + assert.Nil(t, shardRewardData) +} + +func TestSpecialAddresses_ConsensusShardRewardDataExistingData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + shardRewardData := addr.ConsensusShardRewardData() + + assert.NotNil(t, shardRewardData) + assert.Equal(t, uint64(1), shardRewardData.Round) + assert.Equal(t, uint32(2), shardRewardData.Epoch) +} + +func TestSpecialAddresses_SetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + err := addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + + assert.Nil(t, err) +} + +func TestSpecialAddresses_BurnAddress(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + burnAddr := addr.BurnAddress() + + assert.Equal(t, []byte("burn"), burnAddr) +} + +func TestSpecialAddresses_ElrondCommunityAddress(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + communityAddr := addr.ElrondCommunityAddress() + + assert.Equal(t, []byte("community"), communityAddr) +} + +func TestSpecialAddresses_LeaderAddressNoSetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + leaderAddress := addr.LeaderAddress() + + assert.Nil(t, leaderAddress) +} + +func TestSpecialAddresses_LeaderAddressSetShardConsensusData(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 0, 0, 0) + leaderAddress := addr.LeaderAddress() + + assert.Equal(t, "address00", string(leaderAddress)) +} + +func TestSpecialAddresses_Round(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + round := addr.Round() + + assert.Equal(t, uint64(1), round) +} + +func TestSpecialAddresses_Epoch(t *testing.T) { + t.Parallel() + + addr := createDefaultSpecialAddress() + _ = addr.SetShardConsensusData([]byte("randomness"), 1, 2, 0) + epoch := addr.Epoch() + + assert.Equal(t, uint32(2), epoch) +} + +func TestSpecialAddresses_SetElrondCommunityAddress(t *testing.T) { + addr := createDefaultSpecialAddress() + communityAddress := addr.ElrondCommunityAddress() + + assert.Equal(t, []byte("community"), communityAddress) +} + +func TestSpecialAddresses_ShardIdForAddress(t *testing.T) { + args := initDefaultArgs() + args.ShardCoordinator = &mock.MultipleShardsCoordinatorMock{ + NoShards: 4, + ComputeIdCalled: func(address state.AddressContainer) uint32 { + return uint32(address.Bytes()[0]) + }, + CurrentShard: 0, + } + addr, _ := createSpecialAddressFromArgs(args) + shardId, err := addr.ShardIdForAddress([]byte{3}) + + assert.Nil(t, err) + assert.Equal(t, uint32(3), shardId) +} + +func TestSpecialAddresses_IsInterfaceNil(t *testing.T) { + addr := &specialAddresses{} + + addr = nil + isNil := addr.IsInterfaceNil() + + assert.True(t, isNil) +} diff --git a/data/block/block.go b/data/block/block.go index 9b1ad8403a0..123822c2e22 100644 --- a/data/block/block.go +++ b/data/block/block.go @@ -34,8 +34,10 @@ const ( PeerBlock Type = 2 // SmartContractResultBlock identifies a miniblock holding smartcontractresults SmartContractResultBlock Type = 3 + // RewardsBlock identifies a miniblock holding accumulated rewards, both system generated and from tx fees + RewardsBlock Type = 4 // InvalidBlock identifies identifies an invalid miniblock - InvalidBlock Type = 4 + InvalidBlock Type = 5 ) // String returns the string representation of the Type @@ -49,6 +51,8 @@ func (bType Type) String() string { return "PeerBody" case SmartContractResultBlock: return "SmartContractResultBody" + case RewardsBlock: + return "RewardsBody" case InvalidBlock: return "InvalidBlock" default: @@ -99,7 +103,6 @@ type Header struct { RootHash []byte `capid:"13"` MetaBlockHashes [][]byte `capid:"14"` TxCount uint32 `capid:"15"` - processedMBs map[string]bool } // Save saves the serialized data of a Block Header into a stream through Capnp protocol @@ -490,22 +493,6 @@ func (h *Header) MapMiniBlockHashesToShards() map[string]uint32 { return hashDst } -// GetMiniBlockProcessed verifies if miniblock from header was processed -func (h *Header) GetMiniBlockProcessed(hash []byte) bool { - if h.processedMBs == nil { - h.processedMBs = make(map[string]bool, 0) - } - return h.processedMBs[string(hash)] -} - -// SetMiniBlockProcessed set that miniblock with hash to processed or not processed -func (h *Header) SetMiniBlockProcessed(hash []byte, processed bool) { - if h.processedMBs == nil { - h.processedMBs = make(map[string]bool, 0) - } - h.processedMBs[string(hash)] = processed -} - // IntegrityAndValidity checks if data is valid func (b Body) IntegrityAndValidity() error { if b == nil || b.IsInterfaceNil() { diff --git a/data/block/metaBlock.go b/data/block/metaBlock.go index 80fb0afb2ac..8681272d9df 100644 --- a/data/block/metaBlock.go +++ b/data/block/metaBlock.go @@ -71,7 +71,6 @@ type MetaBlock struct { RandSeed []byte `capid:"10"` RootHash []byte `capid:"11"` TxCount uint32 `capid:"12"` - processedMBs map[string]bool } // MetaBlockBody hold the data for metablock body @@ -435,25 +434,6 @@ func (m *MetaBlock) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 return hashDst } -// GetMiniBlockProcessed verifies if miniblock from header was processed -func (m *MetaBlock) GetMiniBlockProcessed(hash []byte) bool { - if m.processedMBs == nil { - m.processedMBs = make(map[string]bool, 0) - } - if m.processedMBs[string(hash)] { - return true - } - return false -} - -// SetMiniBlockProcessed set that miniblock with hash to processed or not processed -func (m *MetaBlock) SetMiniBlockProcessed(hash []byte, processed bool) { - if m.processedMBs == nil { - m.processedMBs = make(map[string]bool, 0) - } - m.processedMBs[string(hash)] = processed -} - // IntegrityAndValidity return true as block is nil for metablock. func (m *MetaBlockBody) IntegrityAndValidity() error { return nil diff --git a/data/CapnpHelper.go b/data/capnpHelper.go similarity index 100% rename from data/CapnpHelper.go rename to data/capnpHelper.go diff --git a/data/consensusRewardData.go b/data/consensusRewardData.go new file mode 100644 index 00000000000..731838d5322 --- /dev/null +++ b/data/consensusRewardData.go @@ -0,0 +1,8 @@ +package data + +// ConsensusRewardData holds the required data for rewarding validators in a specific round and epoch +type ConsensusRewardData struct { + Round uint64 + Epoch uint32 + Addresses []string +} diff --git a/data/errors.go b/data/errors.go index 200f5c2d76e..57509288aeb 100644 --- a/data/errors.go +++ b/data/errors.go @@ -27,3 +27,18 @@ var ErrMiniBlockEmpty = errors.New("mini block is empty") // ErrWrongTypeAssertion signals that wrong type was provided var ErrWrongTypeAssertion = errors.New("wrong type assertion") + +// ErrNilElrondAddress signals that nil elrond address was provided +var ErrNilElrondAddress = errors.New("nil elrond address") + +// ErrNilBurnAddress signals that nil burn address was provided +var ErrNilBurnAddress = errors.New("nil burn address") + +// ErrNilAddressConverter signals that nil address converter was provided +var ErrNilAddressConverter = errors.New("nil address converter") + +// ErrNilShardCoordinator signals that nil shard coordinator was provided +var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrNilNodesCoordinator signals that nil shard coordinator was provided +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") diff --git a/data/interface.go b/data/interface.go index 322c1170abe..8cdc27729ff 100644 --- a/data/interface.go +++ b/data/interface.go @@ -32,8 +32,6 @@ type HeaderHandler interface { SetTxCount(txCount uint32) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 - GetMiniBlockProcessed(hash []byte) bool - SetMiniBlockProcessed(hash []byte, processed bool) IsInterfaceNil() bool ItemsInBody() uint32 diff --git a/data/mock/addressConverterMock.go b/data/mock/addressConverterMock.go new file mode 100644 index 00000000000..de5572b249e --- /dev/null +++ b/data/mock/addressConverterMock.go @@ -0,0 +1,66 @@ +package mock + +import ( + "bytes" + "encoding/hex" + "errors" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +var errFailure = errors.New("failure") + +type AddressConverterMock struct { + Fail bool + CreateAddressFromPublicKeyBytesRetErrForValue []byte +} + +func (acm *AddressConverterMock) CreateAddressFromPublicKeyBytes(pubKey []byte) (state.AddressContainer, error) { + if acm.Fail { + return nil, errFailure + } + + if acm.CreateAddressFromPublicKeyBytesRetErrForValue != nil { + if bytes.Equal(acm.CreateAddressFromPublicKeyBytesRetErrForValue, pubKey) { + return nil, errors.New("error required") + } + } + + return NewAddressMockFromBytes(pubKey), nil +} + +func (acm *AddressConverterMock) ConvertToHex(addressContainer state.AddressContainer) (string, error) { + if acm.Fail { + return "", errFailure + } + + return hex.EncodeToString(addressContainer.Bytes()), nil +} + +func (acm *AddressConverterMock) CreateAddressFromHex(hexAddress string) (state.AddressContainer, error) { + if acm.Fail { + return nil, errFailure + } + + panic("implement me") +} + +func (acm *AddressConverterMock) PrepareAddressBytes(addressBytes []byte) ([]byte, error) { + if acm.Fail { + return nil, errFailure + } + + panic("implement me") +} + +func (acm *AddressConverterMock) AddressLen() int { + return 32 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (acm *AddressConverterMock) IsInterfaceNil() bool { + if acm == nil { + return true + } + return false +} diff --git a/data/mock/multipleShardsCoordinatorMock.go b/data/mock/multipleShardsCoordinatorMock.go new file mode 100644 index 00000000000..38a5ab1814e --- /dev/null +++ b/data/mock/multipleShardsCoordinatorMock.go @@ -0,0 +1,70 @@ +package mock + +import ( + "fmt" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +type MultipleShardsCoordinatorMock struct { + NoShards uint32 + ComputeIdCalled func(address state.AddressContainer) uint32 + CurrentShard uint32 +} + +func NewMultipleShardsCoordinatorMock() *MultipleShardsCoordinatorMock { + return &MultipleShardsCoordinatorMock{NoShards: 1} +} + +func NewMultiShardsCoordinatorMock(nrShard uint32) *MultipleShardsCoordinatorMock { + return &MultipleShardsCoordinatorMock{NoShards: nrShard} +} + +func (scm *MultipleShardsCoordinatorMock) NumberOfShards() uint32 { + return scm.NoShards +} + +func (scm *MultipleShardsCoordinatorMock) ComputeId(address state.AddressContainer) uint32 { + if scm.ComputeIdCalled == nil { + return scm.SelfId() + } + return scm.ComputeIdCalled(address) +} + +func (scm *MultipleShardsCoordinatorMock) SelfId() uint32 { + return scm.CurrentShard +} + +func (scm *MultipleShardsCoordinatorMock) SetSelfId(shardId uint32) error { + return nil +} + +func (scm *MultipleShardsCoordinatorMock) SameShard(firstAddress, secondAddress state.AddressContainer) bool { + return true +} + +func (scm *MultipleShardsCoordinatorMock) SetNoShards(noShards uint32) { + scm.NoShards = noShards +} + +// CommunicationIdentifier returns the identifier between current shard ID and destination shard ID +// identifier is generated such as the first shard from identifier is always smaller than the last +func (scm *MultipleShardsCoordinatorMock) CommunicationIdentifier(destShardID uint32) string { + if destShardID == scm.CurrentShard { + return fmt.Sprintf("_%d", scm.CurrentShard) + } + + if destShardID < scm.CurrentShard { + return fmt.Sprintf("_%d_%d", destShardID, scm.CurrentShard) + } + + return fmt.Sprintf("_%d_%d", scm.CurrentShard, destShardID) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (scm *MultipleShardsCoordinatorMock) IsInterfaceNil() bool { + if scm == nil { + return true + } + return false +} diff --git a/data/mock/nodesCoordinatorMock.go b/data/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..c5f14b54528 --- /dev/null +++ b/data/mock/nodesCoordinatorMock.go @@ -0,0 +1,199 @@ +package mock + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinatorMock struct { + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) +} + +func NewNodesCoordinatorMock() *NodesCoordinatorMock { + nbShards := uint32(1) + nodesPerShard := 2 + validatorsMap := make(map[uint32][]sharding.Validator) + + shards := make([]uint32, nbShards+1) + for i := uint32(0); i < nbShards; i++ { + shards[i] = i + } + shards[nbShards] = sharding.MetachainShardId + + for _, sh := range shards { + validatorsList := make([]sharding.Validator, nodesPerShard) + for v := 0; v < nodesPerShard; v++ { + validatorsList[v], _ = sharding.NewValidator( + big.NewInt(10), + 1, + []byte(fmt.Sprintf("pubKey%d%d", sh, v)), + []byte(fmt.Sprintf("address%d%d", sh, v)), + ) + } + validatorsMap[sh] = validatorsList + } + + return &NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: nbShards, + Validators: validatorsMap, + } +} + +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection, shardId) + } + + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } + + pubKeys := make([]string, 0) + + for _, v := range ncm.Validators[shardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + valGrStr := make([]string, 0) + + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } + + return valGrStr, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } + + if nodes == nil { + return sharding.ErrNilInputNodesMap + } + + ncm.Validators = nodes + + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomess []byte, + round uint64, + shardId uint32, +) ([]sharding.Validator, error) { + var consensusSize uint32 + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) + } + + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize + } + + if randomess == nil { + return nil, sharding.ErrNilRandomness + } + + validatorsGroup := make([]sharding.Validator, 0) + + for i := uint32(0); i < consensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) + } + + return validatorsGroup, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound +} + +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/data/mock/storerStub.go b/data/mock/storerStub.go index d189606d753..af7d1b3ee16 100644 --- a/data/mock/storerStub.go +++ b/data/mock/storerStub.go @@ -4,7 +4,6 @@ type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) HasCalled func(key []byte) error - HasOrAddCalled func(key []byte, value []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -22,10 +21,6 @@ func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) error { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } diff --git a/data/mock/txTypeHandlerMock.go b/data/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..2fcaeaf25d3 --- /dev/null +++ b/data/mock/txTypeHandlerMock.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} diff --git a/data/mock/unsignedTxHandlerMock.go b/data/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..7097c4a31e8 --- /dev/null +++ b/data/mock/unsignedTxHandlerMock.go @@ -0,0 +1,53 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() +} + +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { + return + } + + ut.ProcessTransactionFeeCalled(cost) +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled() +} diff --git a/data/rewardTx/capnp/schema.capnp b/data/rewardTx/capnp/schema.capnp new file mode 100644 index 00000000000..8b963360616 --- /dev/null +++ b/data/rewardTx/capnp/schema.capnp @@ -0,0 +1,19 @@ +@0xa6e50837d4563fc2; +using Go = import "/go.capnp"; +$Go.package("capnp"); +$Go.import("_"); + +struct RewardTxCapn { + round @0: UInt64; + epoch @1: UInt32; + value @2: Data; + rcvAddr @3: Data; + shardId @4: UInt32; +} + +##compile with: + +## +## +## capnpc -I$GOPATH/src/github.com/glycerine/go-capnproto -ogo $GOPATH/src/github.com/ElrondNetwork/elrond-go/data/rewardTx/capnp/schema.capnp + diff --git a/data/rewardTx/capnp/schema.capnp.go b/data/rewardTx/capnp/schema.capnp.go new file mode 100644 index 00000000000..f9e5247b348 --- /dev/null +++ b/data/rewardTx/capnp/schema.capnp.go @@ -0,0 +1,271 @@ +package capnp + +// AUTO GENERATED - DO NOT EDIT + +import ( + "bufio" + "bytes" + "encoding/json" + C "github.com/glycerine/go-capnproto" + "io" +) + +type RewardTxCapn C.Struct + +func NewRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewStruct(16, 2)) } +func NewRootRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewRootStruct(16, 2)) } +func AutoNewRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.NewStructAR(16, 2)) } +func ReadRootRewardTxCapn(s *C.Segment) RewardTxCapn { return RewardTxCapn(s.Root(0).ToStruct()) } +func (s RewardTxCapn) Round() uint64 { return C.Struct(s).Get64(0) } +func (s RewardTxCapn) SetRound(v uint64) { C.Struct(s).Set64(0, v) } +func (s RewardTxCapn) Epoch() uint32 { return C.Struct(s).Get32(8) } +func (s RewardTxCapn) SetEpoch(v uint32) { C.Struct(s).Set32(8, v) } +func (s RewardTxCapn) Value() []byte { return C.Struct(s).GetObject(0).ToData() } +func (s RewardTxCapn) SetValue(v []byte) { C.Struct(s).SetObject(0, s.Segment.NewData(v)) } +func (s RewardTxCapn) RcvAddr() []byte { return C.Struct(s).GetObject(1).ToData() } +func (s RewardTxCapn) SetRcvAddr(v []byte) { C.Struct(s).SetObject(1, s.Segment.NewData(v)) } +func (s RewardTxCapn) ShardId() uint32 { return C.Struct(s).Get32(12) } +func (s RewardTxCapn) SetShardId(v uint32) { C.Struct(s).Set32(12, v) } +func (s RewardTxCapn) WriteJSON(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('{') + if err != nil { + return err + } + _, err = b.WriteString("\"round\":") + if err != nil { + return err + } + { + s := s.Round() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"epoch\":") + if err != nil { + return err + } + { + s := s.Epoch() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"value\":") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"rcvAddr\":") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(',') + if err != nil { + return err + } + _, err = b.WriteString("\"shardId\":") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte('}') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s RewardTxCapn) MarshalJSON() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteJSON(&b) + return b.Bytes(), err +} +func (s RewardTxCapn) WriteCapLit(w io.Writer) error { + b := bufio.NewWriter(w) + var err error + var buf []byte + _ = buf + err = b.WriteByte('(') + if err != nil { + return err + } + _, err = b.WriteString("round = ") + if err != nil { + return err + } + { + s := s.Round() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("epoch = ") + if err != nil { + return err + } + { + s := s.Epoch() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("value = ") + if err != nil { + return err + } + { + s := s.Value() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("rcvAddr = ") + if err != nil { + return err + } + { + s := s.RcvAddr() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + _, err = b.WriteString(", ") + if err != nil { + return err + } + _, err = b.WriteString("shardId = ") + if err != nil { + return err + } + { + s := s.ShardId() + buf, err = json.Marshal(s) + if err != nil { + return err + } + _, err = b.Write(buf) + if err != nil { + return err + } + } + err = b.WriteByte(')') + if err != nil { + return err + } + err = b.Flush() + return err +} +func (s RewardTxCapn) MarshalCapLit() ([]byte, error) { + b := bytes.Buffer{} + err := s.WriteCapLit(&b) + return b.Bytes(), err +} + +type RewardTxCapn_List C.PointerList + +func NewRewardTxCapnList(s *C.Segment, sz int) RewardTxCapn_List { + return RewardTxCapn_List(s.NewCompositeList(16, 2, sz)) +} +func (s RewardTxCapn_List) Len() int { return C.PointerList(s).Len() } +func (s RewardTxCapn_List) At(i int) RewardTxCapn { + return RewardTxCapn(C.PointerList(s).At(i).ToStruct()) +} +func (s RewardTxCapn_List) ToArray() []RewardTxCapn { + n := s.Len() + a := make([]RewardTxCapn, n) + for i := 0; i < n; i++ { + a[i] = s.At(i) + } + return a +} +func (s RewardTxCapn_List) Set(i int, item RewardTxCapn) { C.PointerList(s).Set(i, C.Object(item)) } diff --git a/data/rewardTx/rewardTx.go b/data/rewardTx/rewardTx.go new file mode 100644 index 00000000000..0bee4c200c1 --- /dev/null +++ b/data/rewardTx/rewardTx.go @@ -0,0 +1,119 @@ +package rewardTx + +import ( + "io" + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx/capnp" + "github.com/glycerine/go-capnproto" +) + +// RewardTx holds the data for a reward transaction +type RewardTx struct { + Round uint64 `capid:"1" json:"round"` + Epoch uint32 `capid:"2" json:"epoch"` + Value *big.Int `capid:"3" json:"value"` + RcvAddr []byte `capid:"4" json:"receiver"` + ShardId uint32 `capid:"5" json:"shardId"` +} + +// Save saves the serialized data of a RewardTx into a stream through Capnp protocol +func (scr *RewardTx) Save(w io.Writer) error { + seg := capn.NewBuffer(nil) + RewardTxGoToCapn(seg, scr) + _, err := seg.WriteTo(w) + return err +} + +// Load loads the data from the stream into a RewardTx object through Capnp protocol +func (scr *RewardTx) Load(r io.Reader) error { + capMsg, err := capn.ReadFromStream(r, nil) + if err != nil { + return err + } + + z := capnp.ReadRootRewardTxCapn(capMsg) + RewardTxCapnToGo(z, scr) + return nil +} + +// RewardTxCapnToGo is a helper function to copy fields from a RewardTxCapn object to a RewardTx object +func RewardTxCapnToGo(src capnp.RewardTxCapn, dest *RewardTx) *RewardTx { + if dest == nil { + dest = &RewardTx{} + } + + if dest.Value == nil { + dest.Value = big.NewInt(0) + } + + dest.Epoch = src.Epoch() + dest.Round = src.Round() + err := dest.Value.GobDecode(src.Value()) + + if err != nil { + return nil + } + + dest.RcvAddr = src.RcvAddr() + dest.ShardId = src.ShardId() + + return dest +} + +// RewardTxGoToCapn is a helper function to copy fields from a RewardTx object to a RewardTxCapn object +func RewardTxGoToCapn(seg *capn.Segment, src *RewardTx) capnp.RewardTxCapn { + dest := capnp.AutoNewRewardTxCapn(seg) + + value, _ := src.Value.GobEncode() + dest.SetEpoch(src.Epoch) + dest.SetRound(src.Round) + dest.SetValue(value) + dest.SetRcvAddr(src.RcvAddr) + dest.SetShardId(src.ShardId) + + return dest +} + +// IsInterfaceNil verifies if underlying object is nil +func (scr *RewardTx) IsInterfaceNil() bool { + return scr == nil +} + +// GetValue returns the value of the reward transaction +func (scr *RewardTx) GetValue() *big.Int { + return scr.Value +} + +// GetData returns the data of the reward transaction +func (scr *RewardTx) GetData() string { + return "" +} + +// GetRecvAddress returns the receiver address from the reward transaction +func (scr *RewardTx) GetRecvAddress() []byte { + return scr.RcvAddr +} + +// GetSndAddress returns the sender address from the reward transaction +func (scr *RewardTx) GetSndAddress() []byte { + return nil +} + +// SetValue sets the value of the reward transaction +func (scr *RewardTx) SetValue(value *big.Int) { + scr.Value = value +} + +// SetData sets the data of the reward transaction +func (scr *RewardTx) SetData(data string) { +} + +// SetRecvAddress sets the receiver address of the reward transaction +func (scr *RewardTx) SetRecvAddress(addr []byte) { + scr.RcvAddr = addr +} + +// SetSndAddress sets the sender address of the reward transaction +func (scr *RewardTx) SetSndAddress(addr []byte) { +} diff --git a/data/rewardTx/rewardTx_test.go b/data/rewardTx/rewardTx_test.go new file mode 100644 index 00000000000..80930abac26 --- /dev/null +++ b/data/rewardTx/rewardTx_test.go @@ -0,0 +1,68 @@ +package rewardTx_test + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/stretchr/testify/assert" +) + +func TestRewardTx_SaveLoad(t *testing.T) { + smrS := rewardTx.RewardTx{ + Round: uint64(1), + Epoch: uint32(1), + Value: big.NewInt(1), + RcvAddr: []byte("receiver_address"), + ShardId: 10, + } + + var b bytes.Buffer + err := smrS.Save(&b) + assert.Nil(t, err) + + loadSMR := rewardTx.RewardTx{} + err = loadSMR.Load(&b) + assert.Nil(t, err) + + assert.Equal(t, smrS, loadSMR) +} + +func TestRewardTx_GetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &rewardTx.RewardTx{RcvAddr: data} + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestRewardTx_GetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &rewardTx.RewardTx{Value: value} + + assert.Equal(t, value, scr.Value) +} + +func TestRewardTx_SetRecvAddr(t *testing.T) { + t.Parallel() + + data := []byte("data") + scr := &rewardTx.RewardTx{} + scr.SetRecvAddress(data) + + assert.Equal(t, data, scr.RcvAddr) +} + +func TestRewardTx_SetValue(t *testing.T) { + t.Parallel() + + value := big.NewInt(10) + scr := &rewardTx.RewardTx{} + scr.SetValue(value) + + assert.Equal(t, value, scr.Value) +} diff --git a/data/state/errors.go b/data/state/errors.go index c801162cc21..2d7196f5353 100644 --- a/data/state/errors.go +++ b/data/state/errors.go @@ -130,3 +130,15 @@ var ErrBech32ConvertError = errors.New("can't convert bech32 string") // ErrBech32WrongAddr signals that the string provided might not be in bech32 format var ErrBech32WrongAddr = errors.New("wrong bech32 string") + +// ErrNilStake signals that the provided stake is nil +var ErrNilStake = errors.New("stake is nil") + +// ErrNilSchnorrPublicKey signals that the provided schnorr public is nil +var ErrNilSchnorrPublicKey = errors.New("schnorr public key is nil") + +// ErrNilBLSPublicKey signals that the provided BLS public key is nil +var ErrNilBLSPublicKey = errors.New("bls public key is nil") + +// ErrUnknownAccountType signals that the provided account type is unknown +var ErrUnknownAccountType = errors.New("account type is unknown") diff --git a/data/state/factory/accountCreatorFactory.go b/data/state/factory/accountCreatorFactory.go index 8aa90bc777f..70e297b53b0 100644 --- a/data/state/factory/accountCreatorFactory.go +++ b/data/state/factory/accountCreatorFactory.go @@ -2,22 +2,30 @@ package factory import ( "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/sharding" ) -// NewAccountFactoryCreator returns an account factory depending on shard coordinator self id -func NewAccountFactoryCreator(coordinator sharding.Coordinator) (state.AccountFactory, error) { - if coordinator == nil { - return nil, state.ErrNilShardCoordinator - } +// Type defines account types to save in accounts trie +type Type uint8 - if coordinator.SelfId() < coordinator.NumberOfShards() { - return NewAccountCreator(), nil - } +const ( + // UserAccount identifies an account holding balance, storage updates, code + UserAccount Type = 0 + // ShardStatistics identifies a shard, keeps the statistics + ShardStatistics Type = 1 + // ValidatorAccount identifies an account holding stake, crypto public keys, assigned shard, rating + ValidatorAccount Type = 2 +) - if coordinator.SelfId() == sharding.MetachainShardId { +// NewAccountFactoryCreator returns an account factory depending on shard coordinator self id +func NewAccountFactoryCreator(accountType Type) (state.AccountFactory, error) { + switch accountType { + case UserAccount: + return NewAccountCreator(), nil + case ShardStatistics: return NewMetaAccountCreator(), nil + case ValidatorAccount: + return NewPeerAccountCreator(), nil + default: + return nil, state.ErrUnknownAccountType } - - return nil, state.ErrUnknownShardId } diff --git a/data/state/factory/accountCreatorFactory_test.go b/data/state/factory/accountCreatorFactory_test.go index f0c77ea8b05..d2852ff5a7c 100644 --- a/data/state/factory/accountCreatorFactory_test.go +++ b/data/state/factory/accountCreatorFactory_test.go @@ -6,63 +6,55 @@ import ( "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) -func TestNewAccountFactoryCreator_NilShardCoordinator(t *testing.T) { +func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { t.Parallel() - accF, err := factory.NewAccountFactoryCreator(nil) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) + assert.Nil(t, err) - assert.Equal(t, err, state.ErrNilShardCoordinator) - assert.Nil(t, accF) + accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + _, ok := accWrp.(*state.Account) + assert.Equal(t, true, ok) + + assert.Nil(t, err) + assert.NotNil(t, accF) } -func TestNewAccountFactoryCreator_NormalAccount(t *testing.T) { +func TestNewAccountFactoryCreator_MetaAccount(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) - _, ok := accWrp.(*state.Account) + _, ok := accWrp.(*state.MetaAccount) assert.Equal(t, true, ok) assert.Nil(t, err) assert.NotNil(t, accF) } -func TestNewAccountFactoryCreator_MetaAccount(t *testing.T) { +func TestNewAccountFactoryCreator_PeerAccount(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) assert.Nil(t, err) accWrp, err := accF.CreateAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) - _, ok := accWrp.(*state.MetaAccount) + _, ok := accWrp.(*state.PeerAccount) assert.Equal(t, true, ok) assert.Nil(t, err) assert.NotNil(t, accF) } -func TestNewAccountFactoryCreator_BadShardID(t *testing.T) { +func TestNewAccountFactoryCreator_UnknownType(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 10, - NrOfShards: 5, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(10) assert.Nil(t, accF) - assert.Equal(t, state.ErrUnknownShardId, err) + assert.Equal(t, state.ErrUnknownAccountType, err) } diff --git a/data/state/factory/accountCreator_test.go b/data/state/factory/accountCreator_test.go index cf63e6219a0..1ffc6d27a7e 100644 --- a/data/state/factory/accountCreator_test.go +++ b/data/state/factory/accountCreator_test.go @@ -12,11 +12,7 @@ import ( func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -31,11 +27,7 @@ func TestAccountCreator_CreateAccountNilAddress(t *testing.T) { func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) @@ -50,11 +42,7 @@ func TestAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { func TestAccountCreator_CreateAccountOk(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: 0, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.UserAccount) assert.Nil(t, err) _, ok := accF.(*factory.AccountCreator) diff --git a/data/state/factory/metaAccountCreator_test.go b/data/state/factory/metaAccountCreator_test.go index 836c3d88ef8..326ba3e719c 100644 --- a/data/state/factory/metaAccountCreator_test.go +++ b/data/state/factory/metaAccountCreator_test.go @@ -6,18 +6,13 @@ import ( "github.com/ElrondNetwork/elrond-go/data/mock" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/state/factory" - "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) func TestMetaAccountCreator_CreateAccountNilAddress(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) @@ -32,11 +27,7 @@ func TestMetaAccountCreator_CreateAccountNilAddress(t *testing.T) { func TestMetaAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) @@ -51,11 +42,7 @@ func TestMetaAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { func TestMetaAccountCreator_CreateAccountOk(t *testing.T) { t.Parallel() - shardC := &mock.ShardCoordinatorMock{ - SelfID: sharding.MetachainShardId, - NrOfShards: 1, - } - accF, err := factory.NewAccountFactoryCreator(shardC) + accF, err := factory.NewAccountFactoryCreator(factory.ShardStatistics) assert.Nil(t, err) _, ok := accF.(*factory.MetaAccountCreator) diff --git a/data/state/factory/peerAccountCreator.go b/data/state/factory/peerAccountCreator.go new file mode 100644 index 00000000000..a1edba4e880 --- /dev/null +++ b/data/state/factory/peerAccountCreator.go @@ -0,0 +1,30 @@ +package factory + +import "github.com/ElrondNetwork/elrond-go/data/state" + +// PeerAccountCreator has a method to create a new peer account +type PeerAccountCreator struct { +} + +// NewPeerAccountCreator creates a peer account creator +func NewPeerAccountCreator() state.AccountFactory { + return &PeerAccountCreator{} +} + +// CreateAccount calls the new Account creator and returns the result +func (c *PeerAccountCreator) CreateAccount(address state.AddressContainer, tracker state.AccountTracker) (state.AccountHandler, error) { + account, err := state.NewPeerAccount(address, tracker) + if err != nil { + return nil, err + } + + return account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *PeerAccountCreator) IsInterfaceNil() bool { + if c == nil { + return true + } + return false +} diff --git a/data/state/factory/peerAccountCreator_test.go b/data/state/factory/peerAccountCreator_test.go new file mode 100644 index 00000000000..4496bcdae3e --- /dev/null +++ b/data/state/factory/peerAccountCreator_test.go @@ -0,0 +1,55 @@ +package factory_test + +import ( + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/factory" + "github.com/stretchr/testify/assert" +) + +func TestPeerAccountCreator_CreateAccountNilAddress(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(nil, &mock.AccountTrackerStub{}) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAddressContainer) +} + +func TestPeerAccountCreator_CreateAccountNilAccountTraccer(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(&mock.AddressMock{}, nil) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAccountTracker) +} + +func TestPeerAccountCreator_CreateAccountOk(t *testing.T) { + t.Parallel() + + accF, err := factory.NewAccountFactoryCreator(factory.ValidatorAccount) + assert.Nil(t, err) + + _, ok := accF.(*factory.PeerAccountCreator) + assert.Equal(t, true, ok) + + acc, err := accF.CreateAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) +} diff --git a/data/state/peerAccount.go b/data/state/peerAccount.go new file mode 100644 index 00000000000..976149dbe79 --- /dev/null +++ b/data/state/peerAccount.go @@ -0,0 +1,369 @@ +package state + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +// TimeStamp is a moment defined by epoch and round +type TimeStamp struct { + Epoch uint64 + Round uint64 +} + +// TimePeriod holds start and end time +type TimePeriod struct { + StartTime TimeStamp + EndTime TimeStamp +} + +// SignRate is used to keep the number of success and failed signings +type SignRate struct { + NrSuccess uint32 + NrFailure uint32 +} + +// PeerAccount is the struct used in serialization/deserialization +type PeerAccount struct { + BLSPublicKey []byte + SchnorrPublicKey []byte + Address []byte + Stake *big.Int + + JailTime TimePeriod + PastJailTimes []TimePeriod + + CurrentShardId uint32 + NextShardId uint32 + NodeInWaitingList bool + + ValidatorSuccessRate SignRate + LeaderSuccessRate SignRate + + CodeHash []byte + + Rating uint32 + RootHash []byte + Nonce uint64 + + addressContainer AddressContainer + code []byte + accountTracker AccountTracker + dataTrieTracker DataTrieTracker +} + +// NewPeerAccount creates new simple account wrapper for an PeerAccountContainer (that has just been initialized) +func NewPeerAccount( + addressContainer AddressContainer, + tracker AccountTracker, +) (*PeerAccount, error) { + if addressContainer == nil { + return nil, ErrNilAddressContainer + } + if tracker == nil { + return nil, ErrNilAccountTracker + } + + return &PeerAccount{ + addressContainer: addressContainer, + accountTracker: tracker, + dataTrieTracker: NewTrackableDataTrie(nil), + }, nil +} + +// IsInterfaceNil return if there is no value under the interface +func (a *PeerAccount) IsInterfaceNil() bool { + if a == nil { + return true + } + return false +} + +// AddressContainer returns the address associated with the account +func (a *PeerAccount) AddressContainer() AddressContainer { + return a.addressContainer +} + +// SetNonceWithJournal sets the account's nonce, saving the old nonce before changing +func (a *PeerAccount) SetNonceWithJournal(nonce uint64) error { + entry, err := NewBaseJournalEntryNonce(a, a.Nonce) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Nonce = nonce + + return a.accountTracker.SaveAccount(a) +} + +//SetNonce saves the nonce to the account +func (a *PeerAccount) SetNonce(nonce uint64) { + a.Nonce = nonce +} + +// GetNonce gets the nonce of the account +func (a *PeerAccount) GetNonce() uint64 { + return a.Nonce +} + +// GetCodeHash returns the code hash associated with this account +func (a *PeerAccount) GetCodeHash() []byte { + return a.CodeHash +} + +// SetCodeHash sets the code hash associated with the account +func (a *PeerAccount) SetCodeHash(codeHash []byte) { + a.CodeHash = codeHash +} + +// SetCodeHashWithJournal sets the account's code hash, saving the old code hash before changing +func (a *PeerAccount) SetCodeHashWithJournal(codeHash []byte) error { + entry, err := NewBaseJournalEntryCodeHash(a, a.CodeHash) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.CodeHash = codeHash + + return a.accountTracker.SaveAccount(a) +} + +// GetCode gets the actual code that needs to be run in the VM +func (a *PeerAccount) GetCode() []byte { + return a.code +} + +// SetCode sets the actual code that needs to be run in the VM +func (a *PeerAccount) SetCode(code []byte) { + a.code = code +} + +// GetRootHash returns the root hash associated with this account +func (a *PeerAccount) GetRootHash() []byte { + return a.RootHash +} + +// SetRootHash sets the root hash associated with the account +func (a *PeerAccount) SetRootHash(roothash []byte) { + a.RootHash = roothash +} + +// SetRootHashWithJournal sets the account's root hash, saving the old root hash before changing +func (a *PeerAccount) SetRootHashWithJournal(rootHash []byte) error { + entry, err := NewBaseJournalEntryRootHash(a, a.RootHash, a.DataTrie()) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.RootHash = rootHash + + return a.accountTracker.SaveAccount(a) +} + +// DataTrie returns the trie that holds the current account's data +func (a *PeerAccount) DataTrie() data.Trie { + return a.dataTrieTracker.DataTrie() +} + +// SetDataTrie sets the trie that holds the current account's data +func (a *PeerAccount) SetDataTrie(trie data.Trie) { + a.dataTrieTracker.SetDataTrie(trie) +} + +// DataTrieTracker returns the trie wrapper used in managing the SC data +func (a *PeerAccount) DataTrieTracker() DataTrieTracker { + return a.dataTrieTracker +} + +// SetAddressWithJournal sets the account's address, saving the old address before changing +func (a *PeerAccount) SetAddressWithJournal(address []byte) error { + if len(address) < 1 { + return ErrEmptyAddress + } + + entry, err := NewPeerJournalEntryAddress(a, a.Address) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Address = address + + return a.accountTracker.SaveAccount(a) +} + +// SetSchnorrPublicKeyWithJournal sets the account's public key, saving the old key before changing +func (a *PeerAccount) SetSchnorrPublicKeyWithJournal(pubKey []byte) error { + if len(pubKey) < 1 { + return ErrNilSchnorrPublicKey + } + + entry, err := NewPeerJournalEntrySchnorrPublicKey(a, a.SchnorrPublicKey) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.SchnorrPublicKey = pubKey + + return a.accountTracker.SaveAccount(a) +} + +// SetBLSPublicKeyWithJournal sets the account's bls public key, saving the old key before changing +func (a *PeerAccount) SetBLSPublicKeyWithJournal(pubKey []byte) error { + if len(pubKey) < 1 { + return ErrNilBLSPublicKey + } + + entry, err := NewPeerJournalEntryBLSPublicKey(a, a.BLSPublicKey) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.BLSPublicKey = pubKey + + return a.accountTracker.SaveAccount(a) +} + +// SetStakeWithJournal sets the account's stake, saving the old stake before changing +func (a *PeerAccount) SetStakeWithJournal(stake *big.Int) error { + if stake == nil { + return ErrNilStake + } + + entry, err := NewPeerJournalEntryStake(a, a.Stake) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Stake = stake + + return a.accountTracker.SaveAccount(a) +} + +// SetJailTimeWithJournal sets the account's jail time, saving the old state before changing +func (a *PeerAccount) SetJailTimeWithJournal(jailTime TimePeriod) error { + entry, err := NewPeerJournalEntryJailTime(a, a.JailTime) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.JailTime = jailTime + + return a.accountTracker.SaveAccount(a) +} + +// SetCurrentShardIdWithJournal sets the account's shard id, saving the old state before changing +func (a *PeerAccount) SetCurrentShardIdWithJournal(shId uint32) error { + entry, err := NewPeerJournalEntryCurrentShardId(a, a.CurrentShardId) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.CurrentShardId = shId + + return a.accountTracker.SaveAccount(a) +} + +// SetNextShardIdWithJournal sets the account's shard id, saving the old state before changing +func (a *PeerAccount) SetNextShardIdWithJournal(shId uint32) error { + entry, err := NewPeerJournalEntryNextShardId(a, a.NextShardId) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.NextShardId = shId + + return a.accountTracker.SaveAccount(a) +} + +// SetNodeInWaitingListWithJournal sets the account's nodes status whether in waiting list, saving the old state before +func (a *PeerAccount) SetNodeInWaitingListWithJournal(nodeInWaitingList bool) error { + entry, err := NewPeerJournalEntryInWaitingList(a, a.NodeInWaitingList) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.NodeInWaitingList = nodeInWaitingList + + return a.accountTracker.SaveAccount(a) +} + +// IncreaseValidatorSuccessRateWithJournal increases the account's number of successful signing, +// saving the old state before changing +func (a *PeerAccount) IncreaseValidatorSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.ValidatorSuccessRate.NrSuccess++ + + return a.accountTracker.SaveAccount(a) +} + +// DecreaseValidatorSuccessRateWithJournal increases the account's number of missed signing, +// saving the old state before changing +func (a *PeerAccount) DecreaseValidatorSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryValidatorSuccessRate(a, a.ValidatorSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.ValidatorSuccessRate.NrFailure++ + + return a.accountTracker.SaveAccount(a) +} + +// IncreaseLeaderSuccessRateWithJournal increases the account's number of successful signing, +// saving the old state before changing +func (a *PeerAccount) IncreaseLeaderSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryLeaderSuccessRate(a, a.LeaderSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.LeaderSuccessRate.NrSuccess++ + + return a.accountTracker.SaveAccount(a) +} + +// DecreaseLeaderSuccessRateWithJournal increases the account's number of missing signing, +// saving the old state before changing +func (a *PeerAccount) DecreaseLeaderSuccessRateWithJournal() error { + entry, err := NewPeerJournalEntryLeaderSuccessRate(a, a.LeaderSuccessRate) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.LeaderSuccessRate.NrFailure++ + + return a.accountTracker.SaveAccount(a) +} + +// SetRatingWithJournal sets the account's rating id, saving the old state before changing +func (a *PeerAccount) SetRatingWithJournal(rating uint32) error { + entry, err := NewPeerJournalEntryRating(a, a.Rating) + if err != nil { + return err + } + + a.accountTracker.Journalize(entry) + a.Rating = rating + + return a.accountTracker.SaveAccount(a) +} diff --git a/data/state/peerAccount_test.go b/data/state/peerAccount_test.go new file mode 100644 index 00000000000..8c762964d6d --- /dev/null +++ b/data/state/peerAccount_test.go @@ -0,0 +1,592 @@ +package state_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/stretchr/testify/assert" +) + +func TestPeerAccount_MarshalUnmarshal_ShouldWork(t *testing.T) { + t.Parallel() + + addr := &mock.AddressMock{} + addrTr := &mock.AccountTrackerStub{} + acnt, _ := state.NewPeerAccount(addr, addrTr) + + marshalizer := mock.MarshalizerMock{} + buff, _ := marshalizer.Marshal(&acnt) + + acntRecovered, _ := state.NewPeerAccount(addr, addrTr) + _ = marshalizer.Unmarshal(acntRecovered, buff) + + assert.Equal(t, acnt, acntRecovered) +} + +func TestPeerAccount_NewAccountNilAddress(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(nil, &mock.AccountTrackerStub{}) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAddressContainer) +} + +func TestPeerAccount_NewPeerAccountNilAaccountTracker(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, nil) + + assert.Nil(t, acc) + assert.Equal(t, err, state.ErrNilAccountTracker) +} + +func TestPeerAccount_NewPeerAccountOk(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) +} + +func TestPeerAccount_AddressContainer(t *testing.T) { + t.Parallel() + + addr := &mock.AddressMock{} + acc, err := state.NewPeerAccount(addr, &mock.AccountTrackerStub{}) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, addr, acc.AddressContainer()) +} + +func TestPeerAccount_GetCode(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.SetCode(code) + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCode()) +} + +func TestPeerAccount_GetCodeHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.CodeHash = code + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCodeHash()) +} + +func TestPeerAccount_SetCodeHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + code := []byte("code") + acc.SetCodeHash(code) + + assert.NotNil(t, acc) + assert.Equal(t, code, acc.GetCodeHash()) +} + +func TestPeerAccount_GetRootHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + root := []byte("root") + acc.RootHash = root + + assert.NotNil(t, acc) + assert.Equal(t, root, acc.GetRootHash()) +} + +func TestPeerAccount_SetRootHash(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + root := []byte("root") + acc.SetRootHash(root) + + assert.NotNil(t, acc) + assert.Equal(t, root, acc.GetRootHash()) +} + +func TestPeerAccount_DataTrie(t *testing.T) { + t.Parallel() + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, &mock.AccountTrackerStub{}) + assert.Nil(t, err) + + trie := &mock.TrieStub{} + acc.SetDataTrie(trie) + + assert.NotNil(t, acc) + assert.Equal(t, trie, acc.DataTrie()) +} + +func TestPeerAccount_SetNonceWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + nonce := uint64(0) + err = acc.SetNonceWithJournal(nonce) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, nonce, acc.Nonce) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetCodeHashWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + codeHash := []byte("codehash") + err = acc.SetCodeHashWithJournal(codeHash) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, codeHash, acc.CodeHash) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetRootHashWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + rootHash := []byte("roothash") + err = acc.SetRootHashWithJournal(rootHash) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, rootHash, acc.RootHash) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetAddressWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + address := []byte("address") + err = acc.SetAddressWithJournal(address) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, address, acc.Address) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetSchnorrPublicKeyWithJournalWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + pubKey := []byte("pubkey") + err = acc.SetSchnorrPublicKeyWithJournal(pubKey) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, pubKey, acc.SchnorrPublicKey) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetBLSPublicKeyWithJournalWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + pubKey := []byte("pubkey") + err = acc.SetBLSPublicKeyWithJournal(pubKey) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, pubKey, acc.BLSPublicKey) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetStakeWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + stake := big.NewInt(250000) + err = acc.SetStakeWithJournal(stake) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, stake.Uint64(), acc.Stake.Uint64()) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetCurrentShardIdWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + shId := uint32(10) + err = acc.SetCurrentShardIdWithJournal(shId) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, shId, acc.CurrentShardId) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetNextShardIdWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + shId := uint32(10) + err = acc.SetNextShardIdWithJournal(shId) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, shId, acc.NextShardId) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetNodeInWaitingListWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + err = acc.SetNodeInWaitingListWithJournal(true) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, true, acc.NodeInWaitingList) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetRatingWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + rating := uint32(10) + err = acc.SetRatingWithJournal(rating) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, rating, acc.Rating) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_SetJailTimeWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + jailTime := state.TimePeriod{ + StartTime: state.TimeStamp{Epoch: 12, Round: 12}, + EndTime: state.TimeStamp{Epoch: 13, Round: 13}, + } + err = acc.SetJailTimeWithJournal(jailTime) + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, jailTime, acc.JailTime) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_IncreaseLeaderSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.LeaderSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.IncreaseLeaderSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.LeaderSuccessRate.NrSuccess) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_IncreaseValidatorSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.ValidatorSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.IncreaseValidatorSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.ValidatorSuccessRate.NrSuccess) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_DecreaseLeaderSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.LeaderSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.DecreaseLeaderSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.LeaderSuccessRate.NrFailure) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} + +func TestPeerAccount_DecreaseValidatorSuccessRateWithJournal(t *testing.T) { + t.Parallel() + + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acc, err := state.NewPeerAccount(&mock.AddressMock{}, tracker) + assert.Nil(t, err) + + acc.ValidatorSuccessRate = state.SignRate{NrSuccess: 10, NrFailure: 10} + err = acc.DecreaseValidatorSuccessRateWithJournal() + + assert.NotNil(t, acc) + assert.Nil(t, err) + assert.Equal(t, uint32(11), acc.ValidatorSuccessRate.NrFailure) + assert.Equal(t, 1, journalizeCalled) + assert.Equal(t, 1, saveAccountCalled) +} diff --git a/data/state/peerAccountsDB.go b/data/state/peerAccountsDB.go new file mode 100644 index 00000000000..0dd6b35c4b8 --- /dev/null +++ b/data/state/peerAccountsDB.go @@ -0,0 +1,6 @@ +package state + +// peerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator +type peerAccountsDB struct { + *AccountsDB +} diff --git a/data/state/peerJournalEntries.go b/data/state/peerJournalEntries.go new file mode 100644 index 00000000000..84251b32aee --- /dev/null +++ b/data/state/peerJournalEntries.go @@ -0,0 +1,386 @@ +package state + +import "math/big" + +//------- PeerJournalEntryAddress + +// PeerJournalEntryAddress is used to revert a round change +type PeerJournalEntryAddress struct { + account *PeerAccount + oldAddress []byte +} + +// NewPeerJournalEntryAddress outputs a new PeerJournalEntry implementation used to revert a round change +func NewPeerJournalEntryAddress(account *PeerAccount, oldAddress []byte) (*PeerJournalEntryAddress, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryAddress{ + account: account, + oldAddress: oldAddress, + }, nil +} + +// Revert applies undo operation +func (pje *PeerJournalEntryAddress) Revert() (AccountHandler, error) { + pje.account.Address = pje.oldAddress + + return pje.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pje *PeerJournalEntryAddress) IsInterfaceNil() bool { + if pje == nil { + return true + } + return false +} + +//------- PeerJournalEntrySchnorrPublicKey + +// PeerJournalEntrySchnorrPublicKey is used to revert a round change +type PeerJournalEntrySchnorrPublicKey struct { + account *PeerAccount + oldSchnorrPubKey []byte +} + +// NewPeerJournalEntrySchnorrPublicKey outputs a new PeerJournalEntrySchnorrPublicKey implementation used to revert a round change +func NewPeerJournalEntrySchnorrPublicKey( + account *PeerAccount, + oldSchnorrPubKey []byte, +) (*PeerJournalEntrySchnorrPublicKey, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntrySchnorrPublicKey{ + account: account, + oldSchnorrPubKey: oldSchnorrPubKey, + }, nil +} + +// Revert applies undo operation +func (jens *PeerJournalEntrySchnorrPublicKey) Revert() (AccountHandler, error) { + jens.account.SchnorrPublicKey = jens.oldSchnorrPubKey + + return jens.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (jens *PeerJournalEntrySchnorrPublicKey) IsInterfaceNil() bool { + if jens == nil { + return true + } + return false +} + +//------- PeerJournalEntryBLSPublicKey + +// PeerJournalEntryBLSPublicKey is used to revert a round change +type PeerJournalEntryBLSPublicKey struct { + account *PeerAccount + oldBLSPubKey []byte +} + +// NewPeerJournalEntryBLSPublicKey outputs a new PeerJournalEntryBLSPublicKey implementation used to revert a round change +func NewPeerJournalEntryBLSPublicKey(account *PeerAccount, oldBLSPubKey []byte) (*PeerJournalEntryBLSPublicKey, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryBLSPublicKey{ + account: account, + oldBLSPubKey: oldBLSPubKey, + }, nil +} + +// Revert applies undo operation +func (pjeb *PeerJournalEntryBLSPublicKey) Revert() (AccountHandler, error) { + pjeb.account.BLSPublicKey = pjeb.oldBLSPubKey + + return pjeb.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjeb *PeerJournalEntryBLSPublicKey) IsInterfaceNil() bool { + if pjeb == nil { + return true + } + return false +} + +//------- PeerJournalEntryStake + +// PeerJournalEntryStake is used to revert a stake change +type PeerJournalEntryStake struct { + account *PeerAccount + oldStake *big.Int +} + +// NewPeerJournalEntryStake outputs a new PeerJournalEntryStake implementation used to revert a stake change +func NewPeerJournalEntryStake(account *PeerAccount, oldStake *big.Int) (*PeerJournalEntryStake, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryStake{ + account: account, + oldStake: oldStake, + }, nil +} + +// Revert applies undo operation +func (pjes *PeerJournalEntryStake) Revert() (AccountHandler, error) { + pjes.account.Stake = pjes.oldStake + + return pjes.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjes *PeerJournalEntryStake) IsInterfaceNil() bool { + if pjes == nil { + return true + } + return false +} + +// PeerJournalEntryJailTime is used to revert a balance change +type PeerJournalEntryJailTime struct { + account *PeerAccount + oldJailTime TimePeriod +} + +// NewPeerJournalEntryJailTime outputs a new PeerJournalEntryJailTime implementation used to revert a state change +func NewPeerJournalEntryJailTime(account *PeerAccount, oldJailTime TimePeriod) (*PeerJournalEntryJailTime, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryJailTime{ + account: account, + oldJailTime: oldJailTime, + }, nil +} + +// Revert applies undo operation +func (pjej *PeerJournalEntryJailTime) Revert() (AccountHandler, error) { + pjej.account.JailTime = pjej.oldJailTime + + return pjej.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjej *PeerJournalEntryJailTime) IsInterfaceNil() bool { + if pjej == nil { + return true + } + return false +} + +// PeerJournalEntryCurrentShardId is used to revert a shardId change +type PeerJournalEntryCurrentShardId struct { + account *PeerAccount + oldShardId uint32 +} + +// NewPeerJournalEntryCurrentShardId outputs a new PeerJournalEntryCurrentShardId implementation used to revert a state change +func NewPeerJournalEntryCurrentShardId(account *PeerAccount, oldShardId uint32) (*PeerJournalEntryCurrentShardId, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryCurrentShardId{ + account: account, + oldShardId: oldShardId, + }, nil +} + +// Revert applies undo operation +func (pjec *PeerJournalEntryCurrentShardId) Revert() (AccountHandler, error) { + pjec.account.CurrentShardId = pjec.oldShardId + + return pjec.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjec *PeerJournalEntryCurrentShardId) IsInterfaceNil() bool { + if pjec == nil { + return true + } + return false +} + +// PeerJournalEntryNextShardId is used to revert a shardId change +type PeerJournalEntryNextShardId struct { + account *PeerAccount + oldShardId uint32 +} + +// NewPeerJournalEntryNextShardId outputs a new PeerJournalEntryNextShardId implementation used to revert a state change +func NewPeerJournalEntryNextShardId(account *PeerAccount, oldShardId uint32) (*PeerJournalEntryNextShardId, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryNextShardId{ + account: account, + oldShardId: oldShardId, + }, nil +} + +// Revert applies undo operation +func (pjen *PeerJournalEntryNextShardId) Revert() (AccountHandler, error) { + pjen.account.NextShardId = pjen.oldShardId + + return pjen.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjen *PeerJournalEntryNextShardId) IsInterfaceNil() bool { + if pjen == nil { + return true + } + return false +} + +// PeerJournalEntryInWaitingList is used to revert a shardId change +type PeerJournalEntryInWaitingList struct { + account *PeerAccount + oldNodeInWaitingList bool +} + +// NewPeerJournalEntryInWaitingList outputs a new PeerJournalEntryInWaitingList implementation used to revert a state change +func NewPeerJournalEntryInWaitingList( + account *PeerAccount, + oldNodeInWaitingList bool, +) (*PeerJournalEntryInWaitingList, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryInWaitingList{ + account: account, + oldNodeInWaitingList: oldNodeInWaitingList, + }, nil +} + +// Revert applies undo operation +func (pjew *PeerJournalEntryInWaitingList) Revert() (AccountHandler, error) { + pjew.account.NodeInWaitingList = pjew.oldNodeInWaitingList + + return pjew.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjew *PeerJournalEntryInWaitingList) IsInterfaceNil() bool { + if pjew == nil { + return true + } + return false +} + +// PeerJournalEntryValidatorSuccessRate is used to revert a success rate change +type PeerJournalEntryValidatorSuccessRate struct { + account *PeerAccount + oldValidatorSuccessRate SignRate +} + +// NewPeerJournalEntryValidatorSuccessRate outputs a new PeerJournalEntryValidatorSuccessRate implementation used to revert a state change +func NewPeerJournalEntryValidatorSuccessRate( + account *PeerAccount, + oldValidatorSuccessRate SignRate, +) (*PeerJournalEntryValidatorSuccessRate, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryValidatorSuccessRate{ + account: account, + oldValidatorSuccessRate: oldValidatorSuccessRate, + }, nil +} + +// Revert applies undo operation +func (pjev *PeerJournalEntryValidatorSuccessRate) Revert() (AccountHandler, error) { + pjev.account.ValidatorSuccessRate = pjev.oldValidatorSuccessRate + + return pjev.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjev *PeerJournalEntryValidatorSuccessRate) IsInterfaceNil() bool { + if pjev == nil { + return true + } + return false +} + +// PeerJournalEntryLeaderSuccessRate is used to revert a success rate change +type PeerJournalEntryLeaderSuccessRate struct { + account *PeerAccount + oldLeaderSuccessRate SignRate +} + +// NewPeerJournalEntryLeaderSuccessRate outputs a new PeerJournalEntryLeaderSuccessRate implementation used to revert a state change +func NewPeerJournalEntryLeaderSuccessRate( + account *PeerAccount, + oldLeaderSuccessRate SignRate, +) (*PeerJournalEntryLeaderSuccessRate, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryLeaderSuccessRate{ + account: account, + oldLeaderSuccessRate: oldLeaderSuccessRate, + }, nil +} + +// Revert applies undo operation +func (pjel *PeerJournalEntryLeaderSuccessRate) Revert() (AccountHandler, error) { + pjel.account.LeaderSuccessRate = pjel.oldLeaderSuccessRate + + return pjel.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjel *PeerJournalEntryLeaderSuccessRate) IsInterfaceNil() bool { + if pjel == nil { + return true + } + return false +} + +// PeerJournalEntryRating is used to revert a rating change +type PeerJournalEntryRating struct { + account *PeerAccount + oldRating uint32 +} + +// NewPeerJournalEntryRating outputs a new PeerJournalEntryRating implementation used to revert a state change +func NewPeerJournalEntryRating(account *PeerAccount, oldRating uint32) (*PeerJournalEntryRating, error) { + if account == nil { + return nil, ErrNilAccountHandler + } + + return &PeerJournalEntryRating{ + account: account, + oldRating: oldRating, + }, nil +} + +// Revert applies undo operation +func (pjer *PeerJournalEntryRating) Revert() (AccountHandler, error) { + pjer.account.Rating = pjer.oldRating + + return pjer.account, nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (pjer *PeerJournalEntryRating) IsInterfaceNil() bool { + if pjer == nil { + return true + } + return false +} diff --git a/data/state/peerJournalEntries_test.go b/data/state/peerJournalEntries_test.go new file mode 100644 index 00000000000..06f5edf097d --- /dev/null +++ b/data/state/peerJournalEntries_test.go @@ -0,0 +1,360 @@ +package state_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/mock" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/stretchr/testify/assert" +) + +func TestPeerJournalEntryAddress_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryAddress(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryAddress_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryAddress(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryAddress_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryAddress(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.Address) +} + +func TestPeerJournalEntrySchnorrPublicKey_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntrySchnorrPublicKey(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntrySchnorrPublicKey_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntrySchnorrPublicKey(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntrySchnorrPublicKey_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntrySchnorrPublicKey(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.SchnorrPublicKey) +} + +func TestPeerJournalEntryBLSPublicKey_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryBLSPublicKey(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryBLSPublicKey_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryBLSPublicKey(accnt, []byte("address")) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryBLSPublicKey_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + shardRootHash := []byte("address") + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryBLSPublicKey(accnt, shardRootHash) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, shardRootHash, accnt.BLSPublicKey) +} + +func TestPeerJournalEntryStake_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryStake(nil, nil) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryStake_ShouldWork(t *testing.T) { + t.Parallel() + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryStake(accnt, big.NewInt(9)) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryStake_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + stake := big.NewInt(999) + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryStake(accnt, stake) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, stake.Uint64(), accnt.Stake.Uint64()) +} + +func TestPeerJournalEntryJailTime_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + entry, err := state.NewPeerJournalEntryJailTime(nil, jailTime) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryJailTime_ShouldWork(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryJailTime(accnt, jailTime) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryJailTime_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + startTime := state.TimeStamp{Round: 10, Epoch: 10} + endTime := state.TimeStamp{Round: 11, Epoch: 10} + jailTime := state.TimePeriod{StartTime: startTime, EndTime: endTime} + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryJailTime(accnt, jailTime) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, jailTime, accnt.JailTime) +} + +func TestPeerJournalEntryCurrentShardId_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryCurrentShardId(nil, 0) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryCurrentShardId_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryCurrentShardId(accnt, 0) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryCurrentShardId_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryCurrentShardId(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.CurrentShardId) +} + +func TestPeerJournalEntryNextShardId_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryNextShardId(nil, 0) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryNextShardId_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryNextShardId(accnt, 0) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryNextShardId_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryNextShardId(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.NextShardId) +} + +func TestPeerJournalEntryInWaitingList_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryInWaitingList(nil, true) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryInWaitingList_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryInWaitingList(accnt, true) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryInWaitingList_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryInWaitingList(accnt, true) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.True(t, accnt.NodeInWaitingList) +} + +func TestPeerJournalEntryValidatorSuccessRate_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + + entry, err := state.NewPeerJournalEntryValidatorSuccessRate(nil, successRate) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryValidatorSuccessRate_ShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryValidatorSuccessRate(accnt, successRate) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryValidatorSuccessRate_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryValidatorSuccessRate(accnt, successRate) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, successRate, accnt.ValidatorSuccessRate) +} + +func TestPeerJournalEntryLeaderSuccessRate_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + + entry, err := state.NewPeerJournalEntryLeaderSuccessRate(nil, successRate) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryLeaderSuccessRate_ShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryLeaderSuccessRate(accnt, successRate) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryLeaderSuccessRate_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + successRate := state.SignRate{NrFailure: 10, NrSuccess: 10} + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryLeaderSuccessRate(accnt, successRate) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, successRate, accnt.LeaderSuccessRate) +} + +func TestPeerJournalEntryRating_NilAccountShouldErr(t *testing.T) { + t.Parallel() + + entry, err := state.NewPeerJournalEntryRating(nil, 10) + + assert.Nil(t, entry) + assert.Equal(t, state.ErrNilAccountHandler, err) +} + +func TestPeerJournalEntryRating_ShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, err := state.NewPeerJournalEntryRating(accnt, 10) + + assert.NotNil(t, entry) + assert.Nil(t, err) +} + +func TestPeerJournalEntryRating_RevertOkValsShouldWork(t *testing.T) { + t.Parallel() + + accnt, _ := state.NewPeerAccount(mock.NewAddressMock(), &mock.AccountTrackerStub{}) + entry, _ := state.NewPeerJournalEntryRating(accnt, 10) + _, err := entry.Revert() + + assert.Nil(t, err) + assert.Equal(t, uint32(10), accnt.Rating) +} diff --git a/dataRetriever/dataPool/shardDataPool.go b/dataRetriever/dataPool/shardDataPool.go index af17fdcccab..f87f3cfd726 100644 --- a/dataRetriever/dataPool/shardDataPool.go +++ b/dataRetriever/dataPool/shardDataPool.go @@ -8,6 +8,7 @@ import ( type shardedDataPool struct { transactions dataRetriever.ShardedDataCacherNotifier unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier headers storage.Cacher metaBlocks storage.Cacher headersNonces dataRetriever.Uint64SyncMapCacher @@ -19,6 +20,7 @@ type shardedDataPool struct { func NewShardedDataPool( transactions dataRetriever.ShardedDataCacherNotifier, unsignedTransactions dataRetriever.ShardedDataCacherNotifier, + rewardTransactions dataRetriever.ShardedDataCacherNotifier, headers storage.Cacher, headersNonces dataRetriever.Uint64SyncMapCacher, miniBlocks storage.Cacher, @@ -32,6 +34,9 @@ func NewShardedDataPool( if unsignedTransactions == nil || unsignedTransactions.IsInterfaceNil() { return nil, dataRetriever.ErrNilUnsignedTransactionPool } + if rewardTransactions == nil || rewardTransactions.IsInterfaceNil() { + return nil, dataRetriever.ErrNilRewardTransactionPool + } if headers == nil || headers.IsInterfaceNil() { return nil, dataRetriever.ErrNilHeadersDataPool } @@ -51,6 +56,7 @@ func NewShardedDataPool( return &shardedDataPool{ transactions: transactions, unsignedTransactions: unsignedTransactions, + rewardTransactions: rewardTransactions, headers: headers, headersNonces: headersNonces, miniBlocks: miniBlocks, @@ -69,6 +75,11 @@ func (tdp *shardedDataPool) UnsignedTransactions() dataRetriever.ShardedDataCach return tdp.unsignedTransactions } +// RewardTransactions returns the holder for reward transactions (cross shard result entities) +func (tdp *shardedDataPool) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return tdp.rewardTransactions +} + // Headers returns the holder for headers func (tdp *shardedDataPool) Headers() storage.Cacher { return tdp.headers diff --git a/dataRetriever/dataPool/shardDataPool_test.go b/dataRetriever/dataPool/shardDataPool_test.go index 4b47d68cfa1..14882486d01 100644 --- a/dataRetriever/dataPool/shardDataPool_test.go +++ b/dataRetriever/dataPool/shardDataPool_test.go @@ -15,6 +15,7 @@ func TestNewShardedDataPool_NilTransactionsShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( nil, &mock.ShardedDataStub{}, + &mock.ShardedDataStub{}, &mock.CacherStub{}, &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, @@ -30,6 +31,7 @@ func TestNewShardedDataPool_NilUnsignedTransactionsShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( &mock.ShardedDataStub{}, nil, + &mock.ShardedDataStub{}, &mock.CacherStub{}, &mock.Uint64SyncMapCacherStub{}, &mock.CacherStub{}, @@ -41,8 +43,25 @@ func TestNewShardedDataPool_NilUnsignedTransactionsShouldErr(t *testing.T) { assert.Nil(t, tdp) } +func TestNewShardedDataPool_NilRewardTransactionsShouldErr(t *testing.T) { + tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, + &mock.ShardedDataStub{}, + nil, + &mock.CacherStub{}, + &mock.Uint64SyncMapCacherStub{}, + &mock.CacherStub{}, + &mock.CacherStub{}, + &mock.CacherStub{}, + ) + + assert.Equal(t, dataRetriever.ErrNilRewardTransactionPool, err) + assert.Nil(t, tdp) +} + func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, nil, @@ -58,6 +77,7 @@ func TestNewShardedDataPool_NilHeadersShouldErr(t *testing.T) { func TestNewShardedDataPool_NilHeaderNoncesShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -73,6 +93,7 @@ func TestNewShardedDataPool_NilHeaderNoncesShouldErr(t *testing.T) { func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -88,6 +109,7 @@ func TestNewShardedDataPool_NilTxBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -103,6 +125,7 @@ func TestNewShardedDataPool_NilPeerBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_NilMetaBlocksShouldErr(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( + &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.ShardedDataStub{}, &mock.CacherStub{}, @@ -119,6 +142,7 @@ func TestNewShardedDataPool_NilMetaBlocksShouldErr(t *testing.T) { func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { transactions := &mock.ShardedDataStub{} scResults := &mock.ShardedDataStub{} + rewardTransactions := &mock.ShardedDataStub{} headers := &mock.CacherStub{} headerNonces := &mock.Uint64SyncMapCacherStub{} txBlocks := &mock.CacherStub{} @@ -127,6 +151,7 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { tdp, err := dataPool.NewShardedDataPool( transactions, scResults, + rewardTransactions, headers, headerNonces, txBlocks, @@ -137,6 +162,8 @@ func TestNewShardedDataPool_OkValsShouldWork(t *testing.T) { assert.Nil(t, err) //pointer checking assert.True(t, transactions == tdp.Transactions()) + assert.True(t, scResults == tdp.UnsignedTransactions()) + assert.True(t, rewardTransactions == tdp.RewardTransactions()) assert.True(t, headers == tdp.Headers()) assert.True(t, headerNonces == tdp.HeadersNonces()) assert.True(t, txBlocks == tdp.MiniBlocks()) diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index 627f318ff13..5bcba87ff98 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -40,8 +40,8 @@ var ErrNilPublicKey = errors.New("nil public key") // ErrNilSignature signals that a operation has been attempted with a nil signature var ErrNilSignature = errors.New("nil signature") -// ErrNilMiniBlocks signals that an operation has been attempted with a nil mini-block -var ErrNilMiniBlocks = errors.New("nil mini blocks") +// ErrEmptyMiniBlockSlice signals that an operation has been attempted with an empty mini block slice +var ErrEmptyMiniBlockSlice = errors.New("empty mini block slice") // ErrInvalidShardId signals that the shard id is invalid var ErrInvalidShardId = errors.New("invalid shard id") @@ -64,6 +64,9 @@ var ErrNilTxDataPool = errors.New("nil transaction data pool") // ErrNilUnsignedTransactionPool signals that a nil unsigned transactions pool has been provided var ErrNilUnsignedTransactionPool = errors.New("nil unsigned transactions data pool") +// ErrNilRewardTransactionPool signals that a nil reward transactions pool has been provided +var ErrNilRewardTransactionPool = errors.New("nil reward transaction data pool") + // ErrNilHeadersDataPool signals that a nil header pool has been provided var ErrNilHeadersDataPool = errors.New("nil headers data pool") @@ -176,6 +179,9 @@ var ErrEmptyTxRequestTopic = errors.New("empty transaction request topic") // ErrEmptyScrRequestTopic signals that an empty smart contract result topic has been provided var ErrEmptyScrRequestTopic = errors.New("empty smart contract result request topic") +// ErrEmptyRewardTxRequestTopic signals that an empty reward transaction topic has been provided +var ErrEmptyRewardTxRequestTopic = errors.New("empty rewards transactions request topic") + // ErrEmptyMiniBlockRequestTopic signals that an empty miniblock topic has been provided var ErrEmptyMiniBlockRequestTopic = errors.New("empty miniblock request topic") diff --git a/dataRetriever/factory/shard/resolversContainerFactory.go b/dataRetriever/factory/shard/resolversContainerFactory.go index 757f9539353..eba1026d37d 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory.go +++ b/dataRetriever/factory/shard/resolversContainerFactory.go @@ -74,7 +74,11 @@ func NewResolversContainerFactory( func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer, error) { container := containers.NewResolversContainer() - keys, resolverSlice, err := rcf.generateTxResolvers(factory.TransactionTopic, dataRetriever.TransactionUnit, rcf.dataPools.Transactions()) + keys, resolverSlice, err := rcf.generateTxResolvers( + factory.TransactionTopic, + dataRetriever.TransactionUnit, + rcf.dataPools.Transactions(), + ) if err != nil { return nil, err } @@ -96,6 +100,20 @@ func (rcf *resolversContainerFactory) Create() (dataRetriever.ResolversContainer return nil, err } + keys, resolverSlice, err = rcf.generateTxResolvers( + factory.RewardsTransactionTopic, + dataRetriever.RewardTransactionUnit, + rcf.dataPools.RewardTransactions(), + ) + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, resolverSlice) + if err != nil { + return nil, err + } + keys, resolverSlice, err = rcf.generateHdrResolver() if err != nil { return nil, err diff --git a/dataRetriever/factory/shard/resolversContainerFactory_test.go b/dataRetriever/factory/shard/resolversContainerFactory_test.go index 272c76093fa..aee73e043d8 100644 --- a/dataRetriever/factory/shard/resolversContainerFactory_test.go +++ b/dataRetriever/factory/shard/resolversContainerFactory_test.go @@ -69,6 +69,9 @@ func createDataPools() dataRetriever.PoolsHolder { pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } return pools } @@ -415,13 +418,14 @@ func TestResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolverSCRs := noOfShards numResolverTxs := noOfShards + numResolverRewardTxs := noOfShards numResolverHeaders := 1 numResolverMiniBlocks := noOfShards numResolverPeerChanges := 1 numResolverMetachainShardHeaders := 1 numResolverMetaBlockHeaders := 1 totalResolvers := numResolverTxs + numResolverHeaders + numResolverMiniBlocks + numResolverPeerChanges + - numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverMetachainShardHeaders + numResolverMetaBlockHeaders + numResolverSCRs + numResolverRewardTxs assert.Equal(t, totalResolvers, container.Len()) } diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index df741bdc544..ea44f301cae 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -28,8 +28,12 @@ const ( MetaPeerDataUnit UnitType = 6 // UnsignedTransactionUnit is the unsigned transaction unit identifier UnsignedTransactionUnit UnitType = 7 + // RewardTransactionUnit is the reward transaction unit identifier + RewardTransactionUnit UnitType = 8 // MetaHdrNonceHashDataUnit is the meta header nonce-hash pair data unit identifier - MetaHdrNonceHashDataUnit UnitType = 8 + MetaHdrNonceHashDataUnit UnitType = 9 + // HeartbeatUnit is the heartbeat storage unit identifier + HeartbeatUnit UnitType = 10 // ShardHdrNonceHashDataUnit is the header nonce-hash pair data unit identifier //TODO: Add only unit types lower than 100 @@ -56,7 +60,8 @@ type HeaderResolver interface { type MiniBlocksResolver interface { Resolver RequestDataFromHashArray(hashes [][]byte) error - GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice // TODO miniblockresolver should not know about miniblockslice + GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) + GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } // TopicResolverSender defines what sending operations are allowed for a topic resolver @@ -196,6 +201,7 @@ type Uint64SyncMapCacher interface { type PoolsHolder interface { Transactions() ShardedDataCacherNotifier UnsignedTransactions() ShardedDataCacherNotifier + RewardTransactions() ShardedDataCacherNotifier Headers() storage.Cacher HeadersNonces() Uint64SyncMapCacher MiniBlocks() storage.Cacher diff --git a/dataRetriever/mock/poolsHolderStub.go b/dataRetriever/mock/poolsHolderStub.go index 43599982ea8..d189b57d055 100644 --- a/dataRetriever/mock/poolsHolderStub.go +++ b/dataRetriever/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher } @@ -43,6 +44,10 @@ func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCach return phs.UnsignedTransactionsCalled() } +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/dataRetriever/mock/storerStub.go b/dataRetriever/mock/storerStub.go index d189606d753..af7d1b3ee16 100644 --- a/dataRetriever/mock/storerStub.go +++ b/dataRetriever/mock/storerStub.go @@ -4,7 +4,6 @@ type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) HasCalled func(key []byte) error - HasOrAddCalled func(key []byte, value []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -22,10 +21,6 @@ func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) error { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } diff --git a/dataRetriever/requestHandlers/requestHandler.go b/dataRetriever/requestHandlers/requestHandler.go index 690d87a8b6c..76ebbcf5764 100644 --- a/dataRetriever/requestHandlers/requestHandler.go +++ b/dataRetriever/requestHandlers/requestHandler.go @@ -10,13 +10,14 @@ import ( "github.com/ElrondNetwork/elrond-go/sharding" ) -type ResolverRequestHandler struct { +type resolverRequestHandler struct { resolversFinder dataRetriever.ResolversFinder txRequestTopic string scrRequestTopic string + rewardTxRequestTopic string mbRequestTopic string shardHdrRequestTopic string - metaHdrRequestTopic string + metaHdrRequestTopic string isMetaChain bool maxTxsToRequest int } @@ -28,11 +29,12 @@ func NewShardResolverRequestHandler( finder dataRetriever.ResolversFinder, txRequestTopic string, scrRequestTopic string, + rewardTxRequestTopic string, mbRequestTopic string, shardHdrRequestTopic string, metaHdrRequestTopic string, maxTxsToRequest int, -) (*ResolverRequestHandler, error) { +) (*resolverRequestHandler, error) { if finder == nil || finder.IsInterfaceNil() { return nil, dataRetriever.ErrNilResolverFinder } @@ -42,6 +44,9 @@ func NewShardResolverRequestHandler( if len(scrRequestTopic) == 0 { return nil, dataRetriever.ErrEmptyScrRequestTopic } + if len(rewardTxRequestTopic) == 0 { + return nil, dataRetriever.ErrEmptyRewardTxRequestTopic + } if len(mbRequestTopic) == 0 { return nil, dataRetriever.ErrEmptyMiniBlockRequestTopic } @@ -55,13 +60,14 @@ func NewShardResolverRequestHandler( return nil, dataRetriever.ErrInvalidMaxTxRequest } - rrh := &ResolverRequestHandler{ + rrh := &resolverRequestHandler{ resolversFinder: finder, txRequestTopic: txRequestTopic, mbRequestTopic: mbRequestTopic, shardHdrRequestTopic: shardHdrRequestTopic, metaHdrRequestTopic: metaHdrRequestTopic, scrRequestTopic: scrRequestTopic, + rewardTxRequestTopic: rewardTxRequestTopic, isMetaChain: false, maxTxsToRequest: maxTxsToRequest, } @@ -74,7 +80,7 @@ func NewMetaResolverRequestHandler( finder dataRetriever.ResolversFinder, shardHdrRequestTopic string, metaHdrRequestTopic string, -) (*ResolverRequestHandler, error) { +) (*resolverRequestHandler, error) { if finder == nil || finder.IsInterfaceNil() { return nil, dataRetriever.ErrNilResolverFinder } @@ -85,7 +91,7 @@ func NewMetaResolverRequestHandler( return nil, dataRetriever.ErrEmptyMetaHeaderRequestTopic } - rrh := &ResolverRequestHandler{ + rrh := &resolverRequestHandler{ resolversFinder: finder, shardHdrRequestTopic: shardHdrRequestTopic, metaHdrRequestTopic: metaHdrRequestTopic, @@ -96,11 +102,11 @@ func NewMetaResolverRequestHandler( } // RequestTransaction method asks for transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestTransaction(destShardID uint32, txHashes [][]byte) { +func (rrh *resolverRequestHandler) RequestTransaction(destShardID uint32, txHashes [][]byte) { rrh.requestByHashes(destShardID, txHashes, rrh.txRequestTopic) } -func (rrh *ResolverRequestHandler) requestByHashes(destShardID uint32, hashes [][]byte, topic string) { +func (rrh *resolverRequestHandler) requestByHashes(destShardID uint32, hashes [][]byte, topic string) { log.Debug(fmt.Sprintf("Requesting %d transactions from shard %d from network on topic %s...\n", len(hashes), destShardID, topic)) resolver, err := rrh.resolversFinder.CrossShardResolver(topic, destShardID) if err != nil { @@ -132,17 +138,22 @@ func (rrh *ResolverRequestHandler) requestByHashes(destShardID uint32, hashes [] } // RequestUnsignedTransactions method asks for unsigned transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) { +func (rrh *resolverRequestHandler) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) { rrh.requestByHashes(destShardID, scrHashes, rrh.scrRequestTopic) } +// RequestRewardTransactions requests for reward transactions from the connected peers +func (rrh *resolverRequestHandler) RequestRewardTransactions(destShardId uint32, rewardTxHashes [][]byte) { + rrh.requestByHashes(destShardId, rewardTxHashes, rrh.rewardTxRequestTopic) +} + // RequestMiniBlock method asks for miniblocks from the connected peers -func (rrh *ResolverRequestHandler) RequestMiniBlock(shardId uint32, miniblockHash []byte) { +func (rrh *resolverRequestHandler) RequestMiniBlock(shardId uint32, miniblockHash []byte) { rrh.requestByHash(shardId, miniblockHash, rrh.mbRequestTopic) } // RequestHeader method asks for header from the connected peers -func (rrh *ResolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { +func (rrh *resolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { //TODO: Refactor this class and create specific methods for requesting shard or meta data var topic string if shardId == sharding.MetachainShardId { @@ -154,7 +165,7 @@ func (rrh *ResolverRequestHandler) RequestHeader(shardId uint32, hash []byte) { rrh.requestByHash(shardId, hash, topic) } -func (rrh *ResolverRequestHandler) requestByHash(destShardID uint32, hash []byte, baseTopic string) { +func (rrh *resolverRequestHandler) requestByHash(destShardID uint32, hash []byte, baseTopic string) { log.Debug(fmt.Sprintf("Requesting %s from shard %d with hash %s from network\n", baseTopic, destShardID, core.ToB64(hash))) var resolver dataRetriever.Resolver @@ -178,7 +189,7 @@ func (rrh *ResolverRequestHandler) requestByHash(destShardID uint32, hash []byte } // RequestHeaderByNonce method asks for transactions from the connected peers -func (rrh *ResolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonce uint64) { +func (rrh *resolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonce uint64) { var err error var resolver dataRetriever.Resolver var topic string @@ -208,7 +219,7 @@ func (rrh *ResolverRequestHandler) RequestHeaderByNonce(destShardID uint32, nonc } // IsInterfaceNil returns true if there is no value under the interface -func (rrh *ResolverRequestHandler) IsInterfaceNil() bool { +func (rrh *resolverRequestHandler) IsInterfaceNil() bool { if rrh == nil { return true } diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 2e5f4b747de..bb840b0cd9f 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -51,7 +51,16 @@ func TestNewMetaResolverRequestHandler(t *testing.T) { func TestNewShardResolverRequestHandlerNilFinder(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(nil, "topic", "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + nil, + "topic", + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrNilResolverFinder, err) @@ -60,7 +69,16 @@ func TestNewShardResolverRequestHandlerNilFinder(t *testing.T) { func TestNewShardResolverRequestHandlerTxTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "", "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "", + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyTxRequestTopic, err) @@ -69,7 +87,16 @@ func TestNewShardResolverRequestHandlerTxTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerScrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyScrRequestTopic, err) @@ -78,7 +105,15 @@ func TestNewShardResolverRequestHandlerScrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "", + "topic", + "topic", + 1) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyMiniBlockRequestTopic, err) @@ -87,7 +122,7 @@ func TestNewShardResolverRequestHandlerMBTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerShardHdrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "", "topic", 1) + rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "", "topic", 1) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyShardHeaderRequestTopic, err) @@ -96,7 +131,16 @@ func TestNewShardResolverRequestHandlerShardHdrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMetaHdrTopicEmpty(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + "", + 1, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrEmptyMetaHeaderRequestTopic, err) @@ -105,7 +149,16 @@ func TestNewShardResolverRequestHandlerMetaHdrTopicEmpty(t *testing.T) { func TestNewShardResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "topic", 0) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + "topic", + 0, + ) assert.Nil(t, rrh) assert.Equal(t, dataRetriever.ErrInvalidMaxTxRequest, err) @@ -114,7 +167,16 @@ func TestNewShardResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { func TestNewShardResolverRequestHandler(t *testing.T) { t.Parallel() - rrh, err := NewShardResolverRequestHandler(&mock.ResolversFinderStub{}, "topic", "topic", "topic", "topic", "topic", 1) + rrh, err := NewShardResolverRequestHandler( + &mock.ResolversFinderStub{}, + "topic", + "topic", + "topic", + "topic", + "topic", + "topic", + 1, + ) assert.Nil(t, err) assert.NotNil(t, rrh) @@ -144,6 +206,7 @@ func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardReso "topic", "topic", "topic", + "topic", 1, ) @@ -173,6 +236,7 @@ func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t "topic", "topic", "topic", + "topic", 1, ) @@ -201,6 +265,7 @@ func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *t "topic", "topic", "topic", + "topic", 1, ) @@ -245,6 +310,7 @@ func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic( "topic", "topic", "topic", + "topic", 1, ) @@ -283,6 +349,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardResolv "topic", "topic", "topic", + "topic", 1, ) @@ -317,6 +384,7 @@ func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t "topic", "topic", "topic", + "topic", 1, ) @@ -345,6 +413,7 @@ func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *t "topic", "topic", "topic", + "topic", 1, ) @@ -377,6 +446,7 @@ func TestResolverRequestHandler_RequestHeaderShouldCallRequestOnResolver(t *test "topic", "topic", "topic", + "topic", 1, ) @@ -410,6 +480,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsErrorShoul "topic", "topic", "topic", + "topic", 1, ) @@ -444,6 +515,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardFinderReturnsAWrongReso "topic", "topic", "topic", + "topic", 1, ) @@ -478,6 +550,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardResolverFailsShouldNotP "topic", "topic", "topic", + "topic", 1, ) @@ -506,6 +579,7 @@ func TestResolverRequestHandler_RequestHeaderByNonceShardShouldRequest(t *testin "topic", "topic", "topic", + "topic", 1, ) @@ -564,6 +638,7 @@ func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardResolverShou "topic", "topic", "topic", + "topic", 1, ) @@ -593,6 +668,7 @@ func TestResolverRequestHandler_RequestScrWrongResolverShouldNotPanic(t *testing "topic", "topic", "topic", + "topic", 1, ) @@ -621,6 +697,7 @@ func TestResolverRequestHandler_RequestScrShouldRequestScr(t *testing.T) { "topic", "topic", "topic", + "topic", 1, ) @@ -665,6 +742,7 @@ func TestResolverRequestHandler_RequestScrErrorsOnRequestShouldNotPanic(t *testi "topic", "topic", "topic", + "topic", 1, ) diff --git a/dataRetriever/resolvers/genericBlockBodyResolver.go b/dataRetriever/resolvers/genericBlockBodyResolver.go index cacec11910b..5b39941ca35 100644 --- a/dataRetriever/resolvers/genericBlockBodyResolver.go +++ b/dataRetriever/resolvers/genericBlockBodyResolver.go @@ -74,19 +74,18 @@ func (gbbRes *GenericBlockBodyResolver) ProcessReceivedMessage(message p2p.Messa } func (gbbRes *GenericBlockBodyResolver) resolveBlockBodyRequest(rd *dataRetriever.RequestData) ([]byte, error) { - if rd.Value == nil { return nil, dataRetriever.ErrNilValue } - miniBlockHashes, err := gbbRes.miniBlockHashesFromRequestType(rd) + hashes, err := gbbRes.miniBlockHashesFromRequestType(rd) if err != nil { return nil, err } - miniBlocks := gbbRes.GetMiniBlocks(miniBlockHashes) - if miniBlocks == nil { - return nil, dataRetriever.ErrNilMiniBlocks + miniBlocks, _ := gbbRes.GetMiniBlocks(hashes) + if len(miniBlocks) == 0 { + return nil, dataRetriever.ErrEmptyMiniBlockSlice } buff, err := gbbRes.marshalizer.Marshal(miniBlocks) @@ -141,89 +140,73 @@ func (gbbRes *GenericBlockBodyResolver) RequestDataFromHashArray(hashes [][]byte } // GetMiniBlocks method returns a list of deserialized mini blocks from a given hash list either from data pool or from storage -func (gbbRes *GenericBlockBodyResolver) GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice { - miniBlocks := gbbRes.getMiniBlocks(hashes) - if miniBlocks == nil { - return nil +func (gbbRes *GenericBlockBodyResolver) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + miniBlocks, missingMiniBlocksHashes := gbbRes.GetMiniBlocksFromPool(hashes) + if len(missingMiniBlocksHashes) == 0 { + return miniBlocks, missingMiniBlocksHashes } - mbLength := len(hashes) - expandedMiniBlocks := make(block.MiniBlockSlice, mbLength) + miniBlocksFromStorer, missingMiniBlocksHashes := gbbRes.getMiniBlocksFromStorer(missingMiniBlocksHashes) + miniBlocks = append(miniBlocks, miniBlocksFromStorer...) - for i := 0; i < mbLength; i++ { - mb := &block.MiniBlock{} - err := gbbRes.marshalizer.Unmarshal(mb, miniBlocks[i]) + return miniBlocks, missingMiniBlocksHashes +} - if err != nil { - log.Debug(err.Error()) - gbbRes.miniBlockPool.Remove(hashes[i]) - err = gbbRes.miniBlockStorage.Remove(hashes[i]) - if err != nil { - log.Debug(err.Error()) - } +// GetMiniBlocksFromPool method returns a list of deserialized mini blocks from a given hash list from data pool +func (gbbRes *GenericBlockBodyResolver) GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + miniBlocks := make(block.MiniBlockSlice, 0) + missingMiniBlocksHashes := make([][]byte, 0) - return nil + for i := 0; i < len(hashes); i++ { + obj, ok := gbbRes.miniBlockPool.Peek(hashes[i]) + if !ok { + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue } - expandedMiniBlocks[i] = mb - } - - return expandedMiniBlocks -} - -// getMiniBlocks method returns a list of serialized mini blocks from a given hash list either from data pool or from storage -func (gbbRes *GenericBlockBodyResolver) getMiniBlocks(hashes [][]byte) [][]byte { - miniBlocks := gbbRes.getMiniBlocksFromCache(hashes) + miniBlock, ok := obj.(*block.MiniBlock) + if !ok { + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue + } - if miniBlocks != nil { - return miniBlocks + miniBlocks = append(miniBlocks, miniBlock) } - return gbbRes.getMiniBlocksFromStorer(hashes) + return miniBlocks, missingMiniBlocksHashes } -// getMiniBlocksFromCache returns a full list of miniblocks from cache. -// If any of the miniblocks is missing the function returns nil -func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromCache(hashes [][]byte) [][]byte { - miniBlocksLen := len(hashes) - miniBlocks := make([][]byte, miniBlocksLen) - - for i := 0; i < miniBlocksLen; i++ { - cachedMB, _ := gbbRes.miniBlockPool.Peek(hashes[i]) +// getMiniBlocksFromStorer returns a list of mini blocks from storage and a list of missing hashes +func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + miniBlocks := make(block.MiniBlockSlice, 0) + missingMiniBlocksHashes := make([][]byte, 0) - if cachedMB == nil { - return nil - } - - buff, err := gbbRes.marshalizer.Marshal(cachedMB) + for i := 0; i < len(hashes); i++ { + buff, err := gbbRes.miniBlockStorage.Get(hashes[i]) if err != nil { log.Debug(err.Error()) - return nil + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue } - miniBlocks[i] = buff - } - - return miniBlocks -} - -// getMiniBlocksFromStorer returns a full list of MiniBlocks from the storage unit. -// If any MiniBlock is missing or is invalid, it is removed and the function returns nil -func (gbbRes *GenericBlockBodyResolver) getMiniBlocksFromStorer(hashes [][]byte) [][]byte { - miniBlocksLen := len(hashes) - miniBlocks := make([][]byte, miniBlocksLen) - - for i := 0; i < miniBlocksLen; i++ { - buff, err := gbbRes.miniBlockStorage.Get(hashes[i]) + miniBlock := &block.MiniBlock{} + err = gbbRes.marshalizer.Unmarshal(miniBlock, buff) if err != nil { log.Debug(err.Error()) - return nil + gbbRes.miniBlockPool.Remove([]byte(hashes[i])) + err = gbbRes.miniBlockStorage.Remove([]byte(hashes[i])) + if err != nil { + log.Debug(err.Error()) + } + + missingMiniBlocksHashes = append(missingMiniBlocksHashes, hashes[i]) + continue } - miniBlocks[i] = buff + miniBlocks = append(miniBlocks, miniBlock) } - return miniBlocks + return miniBlocks, missingMiniBlocksHashes } // IsInterfaceNil returns true if there is no value under the interface diff --git a/go.sum b/go.sum index 5f960aa4f05..c2336c66e76 100644 --- a/go.sum +++ b/go.sum @@ -108,7 +108,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -176,7 +175,6 @@ github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZl github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -476,8 +474,6 @@ golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443 h1:IcSOAf4PyMp3U3XbIEj1/xJ2BjNN2jWv7JoyOsMxXUU= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM= diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index b0392f57683..e36bb533223 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -28,7 +28,14 @@ func getPkEncoded(pubKey crypto.PublicKey) string { return encodeAddress(pk) } -func initNodesAndTest(numNodes, consensusSize, numInvalid uint32, roundTime uint64, consensusType string) ([]*testNode, p2p.Messenger, *sync.Map) { +func initNodesAndTest( + numNodes, + consensusSize, + numInvalid uint32, + roundTime uint64, + consensusType string, +) ([]*testNode, p2p.Messenger, *sync.Map) { + fmt.Println("Step 1. Setup nodes...") advertiser := createMessengerWithKadDht(context.Background(), "") @@ -43,24 +50,45 @@ func initNodesAndTest(numNodes, consensusSize, numInvalid uint32, roundTime uint getConnectableAddress(advertiser), consensusType, ) - displayAndStartNodes(nodes) + + for _, nodesList := range nodes { + displayAndStartNodes(nodesList) + } if numInvalid < numNodes { for i := uint32(0); i < numInvalid; i++ { - nodes[i].blkProcessor.ProcessBlockCalled = func(blockChain data.ChainHandler, header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { - fmt.Println("process block invalid ", header.GetRound(), header.GetNonce(), getPkEncoded(nodes[i].pk)) + nodes[0][i].blkProcessor.ProcessBlockCalled = func( + blockChain data.ChainHandler, + header data.HeaderHandler, + body data.BodyHandler, + haveTime func() time.Duration, + ) error { + + fmt.Println( + "process block invalid ", + header.GetRound(), + header.GetNonce(), + getPkEncoded(nodes[0][i].pk), + ) return process.ErrBlockHashDoesNotMatch } - nodes[i].blkProcessor.CreateBlockHeaderCalled = func(body data.BodyHandler, round uint64, haveTime func() bool) (handler data.HeaderHandler, e error) { + nodes[0][i].blkProcessor.CreateBlockHeaderCalled = func( + body data.BodyHandler, + round uint64, + haveTime func() bool, + ) (handler data.HeaderHandler, e error) { return nil, process.ErrAccountStateDirty } - nodes[i].blkProcessor.CreateBlockCalled = func(round uint64, haveTime func() bool) (handler data.BodyHandler, e error) { + nodes[0][i].blkProcessor.CreateBlockCalled = func( + round uint64, + haveTime func() bool, + ) (handler data.BodyHandler, e error) { return nil, process.ErrWrongTypeAssertion } } } - return nodes, advertiser, concMap + return nodes[0], advertiser, concMap } func startNodesWithCommitBlock(nodes []*testNode, mutex *sync.Mutex, nonceForRoundMap map[uint64]uint64, totalCalled *int) error { diff --git a/integrationTests/consensus/testInitializer.go b/integrationTests/consensus/testInitializer.go index 83639c2d48b..37720d09687 100644 --- a/integrationTests/consensus/testInitializer.go +++ b/integrationTests/consensus/testInitializer.go @@ -10,6 +10,8 @@ import ( "sync" "time" + "math/big" + "github.com/ElrondNetwork/elrond-go/consensus/round" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing" @@ -73,6 +75,48 @@ type testNode struct { metachainHdrRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + address := fmt.Sprintf("addr_%d_%d", shardId, i) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(address)) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { prvKey, _ := ecdsa.GenerateKey(btcec.S256(), r) sk := (*libp2pCrypto.Secp256k1PrivateKey)(prvKey) @@ -148,6 +192,7 @@ func createTestStore() dataRetriever.StorageService { func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) @@ -167,6 +212,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, @@ -191,21 +237,37 @@ func createAccountsDB(marshalizer marshal.Marshalizer) state.AccountsAdapter { return adb } -func initialPrivPubKeys(numConsensus int) ([]crypto.PrivateKey, []crypto.PublicKey, crypto.KeyGenerator) { - privKeys := make([]crypto.PrivateKey, 0) - pubKeys := make([]crypto.PublicKey, 0) - - testSuite := kyber.NewSuitePairingBn256() - testKeyGen := signing.NewKeyGenerator(testSuite) +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } - for i := 0; i < numConsensus; i++ { - sk, pk := testKeyGen.GeneratePair() + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs - privKeys = append(privKeys, sk) - pubKeys = append(pubKeys, pk) + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, } - return privKeys, pubKeys, testKeyGen + return params } func createHasher(consensusType string) hashing.Hasher { @@ -217,6 +279,7 @@ func createHasher(consensusType string) hashing.Hasher { func createConsensusOnlyNode( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, shardId uint32, selfId uint32, initialAddr string, @@ -267,13 +330,6 @@ func createConsensusOnlyNode( return nil } blockProcessor.Marshalizer = testMarshalizer - blockTracker := &mock.BlocksTrackerMock{ - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - SetBlockBroadcastRoundCalled: func(nonce uint64, round int64) { - }, - } blockChain := createTestBlockChain() header := &dataBlock.Header{ @@ -348,6 +404,7 @@ func createConsensusOnlyNode( node.WithAccountsAdapter(accntAdapter), node.WithKeyGen(testKeyGen), node.WithShardCoordinator(shardCoordinator), + node.WithNodesCoordinator(nodesCoordinator), node.WithBlockChain(blockChain), node.WithMultiSigner(testMultiSig), node.WithTxSingleSigner(singlesigner), @@ -358,7 +415,6 @@ func createConsensusOnlyNode( node.WithDataStore(createTestStore()), node.WithResolversFinder(resolverFinder), node.WithConsensusType(consensusType), - node.WithBlockTracker(blockTracker), ) if err != nil { @@ -374,41 +430,60 @@ func createNodes( roundTime uint64, serviceID string, consensusType string, -) []*testNode { +) map[uint32][]*testNode { - privKeys, pubKeys, testKeyGen := initialPrivPubKeys(nodesPerShard) - //first node generated will have is pk belonging to firstSkShardId - nodes := make([]*testNode, nodesPerShard) + nodes := make(map[uint32][]*testNode) + cp := createCryptoParams(nodesPerShard, 1, 1) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) + nodesList := make([]*testNode, nodesPerShard) + + pubKeys := make([]crypto.PublicKey, len(cp.keys[0])) + for idx, keyPairShard := range cp.keys[0] { + pubKeys[idx] = keyPairShard.pk + } for i := 0; i < nodesPerShard; i++ { testNode := &testNode{ shardId: uint32(0), } + kp := cp.keys[0][i] shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(1), uint32(0)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + consensusSize, + 1, + createHasher(consensusType), + 0, + 1, + validatorsMap, + ) + n, mes, blkProcessor, blkc := createConsensusOnlyNode( shardCoordinator, + nodesCoordinator, testNode.shardId, uint32(i), serviceID, uint32(consensusSize), roundTime, - privKeys[i], + kp.sk, pubKeys, - testKeyGen, + cp.keyGen, consensusType, ) testNode.node = n testNode.node = n - testNode.sk = privKeys[i] + testNode.sk = kp.sk testNode.mesenger = mes - testNode.pk = pubKeys[i] + testNode.pk = kp.pk testNode.blkProcessor = blkProcessor testNode.blkc = blkc - nodes[i] = testNode + nodesList[i] = testNode } + nodes[0] = nodesList return nodes } diff --git a/integrationTests/frontend/wallet/txInterception_test.go b/integrationTests/frontend/wallet/txInterception_test.go index 30fa273531a..18a49ddc2b8 100644 --- a/integrationTests/frontend/wallet/txInterception_test.go +++ b/integrationTests/frontend/wallet/txInterception_test.go @@ -11,34 +11,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestInterceptedTxFromFrontendGeneratedParamsWithoutData(t *testing.T) { - testInterceptedTxFromFrontendGeneratedParams( - t, - 0, - big.NewInt(10), - "53669be65aac358a6add8e8a8b1251bb994dc1e4a0cc885956f5ecd53396f0d8", - "fe73b8960894941bcf100f7378dba2a6fa2591343413710073c2515817b27dc5", - "f2ae2ad6585f3b44bbbe84f93c3c5ec04a53799d24c04a1dd519666f2cd3dc3d7fbe6c75550b0eb3567fdc0708a8534ae3e5393d0dd9e03c70972f2e716a7007", - 0, - 0, - "", - ) -} - -func TestInterceptedTxFromFrontendGeneratedParams(t *testing.T) { - testInterceptedTxFromFrontendGeneratedParams( - t, - 0, - big.NewInt(10), - "53669be65aac358a6add8e8a8b1251bb994dc1e4a0cc885956f5ecd53396f0d8", - "6c9f95220912dfe4d7be57c26f8f4d1594fee53fc6d958fb9009ed744a681a5a", - "e0e5d089dd7d47abfeabf17f4d4ab0022c32b844dfd8124e45c20370d1a1049202c50d8e9c4e8841ce65848b5f0503212e9879f0556706dc6a849d789dfdcb01", - 0, - 0, - "aa@bbbb@cccc", - ) -} - func TestInterceptedTxFromFrontendGeneratedParamsAllParams(t *testing.T) { testInterceptedTxFromFrontendGeneratedParams( t, @@ -106,7 +78,12 @@ func testInterceptedTxFromFrontendGeneratedParams( initialNodeAddr := "nodeAddr" valMinting := big.NewInt(20000) - node := integrationTests.NewTestProcessorNode(maxShards, nodeShardId, txSignPrivKeyShardId, initialNodeAddr) + node := integrationTests.NewTestProcessorNode( + maxShards, + nodeShardId, + txSignPrivKeyShardId, + initialNodeAddr, + ) txHexHash := "" diff --git a/integrationTests/interface.go b/integrationTests/interface.go new file mode 100644 index 00000000000..54768aa04c3 --- /dev/null +++ b/integrationTests/interface.go @@ -0,0 +1,11 @@ +package integrationTests + +import "github.com/ElrondNetwork/elrond-go/process" + +// TestBootstrapper extends the Bootstrapper interface with some functions intended to be used only in tests +// as it simplifies the reproduction of edge cases +type TestBootstrapper interface { + process.Bootstrapper + ForkChoice(revertUsingForkNonce bool) error + SetProbableHighestNonce(nonce uint64) +} diff --git a/integrationTests/mock/blockProcessorMock.go b/integrationTests/mock/blockProcessorMock.go index 6a4ce0ce0b1..65a48103c79 100644 --- a/integrationTests/mock/blockProcessorMock.go +++ b/integrationTests/mock/blockProcessorMock.go @@ -22,6 +22,7 @@ type BlockProcessorMock struct { DecodeBlockBodyCalled func(dta []byte) data.BodyHandler DecodeBlockHeaderCalled func(dta []byte) data.HeaderHandler AddLastNotarizedHdrCalled func(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusDataCalled func(randomness []byte, round uint64, epoch uint32, shardId uint32) } // ProcessBlock mocks pocessing a block @@ -92,6 +93,12 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + if blProcMock.SetConsensusDataCalled != nil { + blProcMock.SetConsensusDataCalled(randomness, round, epoch, shardId) + } +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/integrationTests/mock/blocksTrackerMock.go b/integrationTests/mock/blocksTrackerMock.go deleted file mode 100644 index 864fadad627..00000000000 --- a/integrationTests/mock/blocksTrackerMock.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -type BlocksTrackerMock struct { - UnnotarisedBlocksCalled func() []data.HeaderHandler - RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error - AddBlockCalled func(headerHandler data.HeaderHandler) - SetBlockBroadcastRoundCalled func(nonce uint64, round int64) - BlockBroadcastRoundCalled func(nonce uint64) int64 -} - -func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { - return btm.UnnotarisedBlocksCalled() -} - -func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return btm.RemoveNotarisedBlocksCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { - btm.AddBlockCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int64) { - btm.SetBlockBroadcastRoundCalled(nonce, round) -} - -func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int64 { - return btm.BlockBroadcastRoundCalled(nonce) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (btm *BlocksTrackerMock) IsInterfaceNil() bool { - if btm == nil { - return true - } - return false -} diff --git a/integrationTests/mock/chronologyValidatorMock.go b/integrationTests/mock/chronologyValidatorMock.go deleted file mode 100644 index ba66f42421a..00000000000 --- a/integrationTests/mock/chronologyValidatorMock.go +++ /dev/null @@ -1,16 +0,0 @@ -package mock - -type ChronologyValidatorMock struct { -} - -func (cvm *ChronologyValidatorMock) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil -} - -// IsInterfaceNil returns true if there is no value under the interface -func (cvm *ChronologyValidatorMock) IsInterfaceNil() bool { - if cvm == nil { - return true - } - return false -} diff --git a/integrationTests/mock/feeHandlerStub.go b/integrationTests/mock/feeHandlerStub.go new file mode 100644 index 00000000000..f6d983310e7 --- /dev/null +++ b/integrationTests/mock/feeHandlerStub.go @@ -0,0 +1,27 @@ +package mock + +type FeeHandlerStub struct { + MinGasPriceCalled func() uint64 + MinGasLimitCalled func() uint64 + MinTxFeeCalled func() uint64 +} + +func (fhs *FeeHandlerStub) MinGasPrice() uint64 { + return fhs.MinGasPriceCalled() +} + +func (fhs *FeeHandlerStub) MinGasLimit() uint64 { + return fhs.MinGasLimitCalled() +} + +func (fhs *FeeHandlerStub) MinTxFee() uint64 { + return fhs.MinTxFeeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fhs *FeeHandlerStub) IsInterfaceNil() bool { + if fhs == nil { + return true + } + return false +} diff --git a/integrationTests/mock/hasherSpongeMock.go b/integrationTests/mock/hasherSpongeMock.go new file mode 100644 index 00000000000..2a1c66b9318 --- /dev/null +++ b/integrationTests/mock/hasherSpongeMock.go @@ -0,0 +1,33 @@ +package mock + +import ( + "golang.org/x/crypto/blake2b" +) + +var hasherSpongeEmptyHash []byte + +const hashSize = 16 + +// HasherSpongeMock that will be used for testing +type HasherSpongeMock struct { +} + +// Compute will output the SHA's equivalent of the input string +func (sha HasherSpongeMock) Compute(s string) []byte { + h, _ := blake2b.New(hashSize, nil) + h.Write([]byte(s)) + return h.Sum(nil) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (sha HasherSpongeMock) EmptyHash() []byte { + if len(hasherSpongeEmptyHash) == 0 { + hasherSpongeEmptyHash = sha.Compute("") + } + return hasherSpongeEmptyHash +} + +// Size returns the required size in bytes +func (HasherSpongeMock) Size() int { + return hashSize +} diff --git a/integrationTests/mock/intermediateTransactionHandlerMock.go b/integrationTests/mock/intermediateTransactionHandlerMock.go index 2967c465e04..e553b2472dc 100644 --- a/integrationTests/mock/intermediateTransactionHandlerMock.go +++ b/integrationTests/mock/intermediateTransactionHandlerMock.go @@ -51,7 +51,7 @@ func (ith *IntermediateTransactionHandlerMock) SaveCurrentIntermediateTxToStorag func (ith *IntermediateTransactionHandlerMock) CreateBlockStarted() { if ith.CreateBlockStartedCalled != nil { - ith.CreateAllInterMiniBlocksCalled() + ith.CreateBlockStarted() } } diff --git a/integrationTests/mock/keyMock.go b/integrationTests/mock/keyMock.go new file mode 100644 index 00000000000..1b94601ef15 --- /dev/null +++ b/integrationTests/mock/keyMock.go @@ -0,0 +1,88 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/crypto" +) + +type PublicKeyMock struct { +} + +type PrivateKeyMock struct { +} + +type KeyGenMock struct { +} + +//------- PublicKeyMock + +func (sspk *PublicKeyMock) ToByteArray() ([]byte, error) { + return []byte("pubKey"), nil +} + +func (sspk *PublicKeyMock) Suite() crypto.Suite { + return nil +} + +func (sspk *PublicKeyMock) Point() crypto.Point { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sspk *PublicKeyMock) IsInterfaceNil() bool { + if sspk == nil { + return true + } + return false +} + +//------- PrivateKeyMock + +func (sk *PrivateKeyMock) ToByteArray() ([]byte, error) { + return []byte("privKey"), nil +} + +func (sk *PrivateKeyMock) GeneratePublic() crypto.PublicKey { + return &PublicKeyMock{} +} + +func (sk *PrivateKeyMock) Suite() crypto.Suite { + return nil +} + +func (sk *PrivateKeyMock) Scalar() crypto.Scalar { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sk *PrivateKeyMock) IsInterfaceNil() bool { + if sk == nil { + return true + } + return false +} + +//------KeyGenMock + +func (keyGen *KeyGenMock) GeneratePair() (crypto.PrivateKey, crypto.PublicKey) { + return &PrivateKeyMock{}, &PublicKeyMock{} +} + +func (keyGen *KeyGenMock) PrivateKeyFromByteArray(b []byte) (crypto.PrivateKey, error) { + return &PrivateKeyMock{}, nil +} + +func (keyGen *KeyGenMock) PublicKeyFromByteArray(b []byte) (crypto.PublicKey, error) { + return &PublicKeyMock{}, nil +} + +func (keyGen *KeyGenMock) Suite() crypto.Suite { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (keyGen *KeyGenMock) IsInterfaceNil() bool { + if keyGen == nil { + return true + } + return false +} diff --git a/integrationTests/mock/miniBlocksResolverMock.go b/integrationTests/mock/miniBlocksResolverMock.go index 8b2a5a64518..9dc3364aa95 100644 --- a/integrationTests/mock/miniBlocksResolverMock.go +++ b/integrationTests/mock/miniBlocksResolverMock.go @@ -9,7 +9,8 @@ type MiniBlocksResolverMock struct { RequestDataFromHashCalled func(hash []byte) error RequestDataFromHashArrayCalled func(hashes [][]byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error - GetMiniBlocksCalled func(hashes [][]byte) block.MiniBlockSlice + GetMiniBlocksCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) + GetMiniBlocksFromPoolCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } func (hrm *MiniBlocksResolverMock) RequestDataFromHash(hash []byte) error { @@ -24,10 +25,14 @@ func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P return hrm.ProcessReceivedMessageCalled(message) } -func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice { +func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return hrm.GetMiniBlocksCalled(hashes) } +func (hrm *MiniBlocksResolverMock) GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return hrm.GetMiniBlocksFromPoolCalled(hashes) +} + // IsInterfaceNil returns true if there is no value under the interface func (hrm *MiniBlocksResolverMock) IsInterfaceNil() bool { if hrm == nil { diff --git a/integrationTests/mock/multiSigMock.go b/integrationTests/mock/multiSigMock.go index 77b82efc5fe..8d561394dc4 100644 --- a/integrationTests/mock/multiSigMock.go +++ b/integrationTests/mock/multiSigMock.go @@ -40,10 +40,10 @@ func NewMultiSigner(nrConsens uint32) *BelNevMock { multisigner.sigs = make([][]byte, nrConsens) multisigner.pubkeys = make([]string, nrConsens) - multisigner.aggCom = []byte("commitment") - multisigner.commHash = []byte("commitment") - multisigner.commSecret = []byte("commitment") - multisigner.aggSig = []byte("commitment") + multisigner.aggCom = []byte("agg commitment") + multisigner.commHash = []byte("commitment hash") + multisigner.commSecret = []byte("commitment secret") + multisigner.aggSig = []byte("aggregated signature") return multisigner } @@ -92,7 +92,11 @@ func (bnm *BelNevMock) SetAggregatedSig(aggSig []byte) error { // Verify returns nil if the aggregateed signature is verified for the given public keys func (bnm *BelNevMock) Verify(msg []byte, bitmap []byte) error { - return bnm.VerifyMock(msg, bitmap) + if bnm.VerifyMock != nil { + return bnm.VerifyMock(msg, bitmap) + } + + return nil } // CreateCommitment creates a secret commitment and the corresponding public commitment point diff --git a/integrationTests/mock/nodesCoordinatorMock.go b/integrationTests/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..d591a93f6e1 --- /dev/null +++ b/integrationTests/mock/nodesCoordinatorMock.go @@ -0,0 +1,99 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) +} + +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) + } + + list := []sharding.Validator{} + + return list, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/integrationTests/mock/poolscleanerMock.go b/integrationTests/mock/poolscleanerMock.go new file mode 100644 index 00000000000..c5b32a5e6c7 --- /dev/null +++ b/integrationTests/mock/poolscleanerMock.go @@ -0,0 +1,27 @@ +package mock + +import "time" + +type TxPoolsCleanerMock struct { + CleanCalled func(duration time.Duration) (bool, error) + NumRemovedTxsCalled func() uint64 +} + +// Clean will check if in pools exits transactions with nonce low that transaction sender account nonce +// and if tx have low nonce will be removed from pools +func (tpc *TxPoolsCleanerMock) Clean(duration time.Duration) (bool, error) { + return false, nil +} + +// NumRemovedTxs will return the number of removed txs from pools +func (tpc *TxPoolsCleanerMock) NumRemovedTxs() uint64 { + return 0 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tpc *TxPoolsCleanerMock) IsInterfaceNil() bool { + if tpc == nil { + return true + } + return false +} diff --git a/integrationTests/mock/specialAddressHandlerMock.go b/integrationTests/mock/specialAddressHandlerMock.go new file mode 100644 index 00000000000..26f70f560c8 --- /dev/null +++ b/integrationTests/mock/specialAddressHandlerMock.go @@ -0,0 +1,151 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type SpecialAddressHandlerMock struct { + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData +} + +func NewSpecialAddressHandlerMock( + addrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, +) *SpecialAddressHandlerMock { + return &SpecialAddressHandlerMock{ + ElrondCommunityAddressCalled: nil, + LeaderAddressCalled: nil, + BurnAddressCalled: nil, + ShardIdForAddressCalled: nil, + AdrConv: addrConv, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + shardConsensusData: &data.ConsensusRewardData{ + Round: 0, + Epoch: 0, + Addresses: nil, + }, + metaConsensusData: make([]*data.ConsensusRewardData, 0), + } +} + +func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { +} + +func (sh *SpecialAddressHandlerMock) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) error { + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + if err != nil { + return err + } + + sh.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + } + + return nil + +} + +func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { + return sh.shardConsensusData +} + +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { + if sh.metaConsensusData == nil { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) + } + + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, sharding.MetachainShardId) + if err != nil { + return err + } + + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + }) + + return nil + +} + +func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +func (sh *SpecialAddressHandlerMock) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sh.metaConsensusData +} + +func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { + if sh.BurnAddressCalled == nil { + return []byte("burn0000000000000000000000000000") + } + + return sh.BurnAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond00000000000000000000000000") + } + + return sh.ElrondCommunityAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { + if sh.LeaderAddressCalled == nil { + if sh.shardConsensusData != nil && len(sh.shardConsensusData.Addresses) > 0 { + return []byte(sh.shardConsensusData.Addresses[0]) + } + return []byte("leader0000000000000000000000000000") + } + + return sh.LeaderAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) Round() uint64 { + if sh.shardConsensusData == nil { + return 0 + } + return sh.shardConsensusData.Round +} + +func (sh *SpecialAddressHandlerMock) Epoch() uint32 { + if sh.shardConsensusData == nil { + return 0 + } + return sh.shardConsensusData.Epoch +} + +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } + + return sh.ShardCoordinator.ComputeId(convAdr), nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { + if sh == nil { + return true + } + return false +} diff --git a/integrationTests/mock/txTypeHandlerMock.go b/integrationTests/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..42b6460b56f --- /dev/null +++ b/integrationTests/mock/txTypeHandlerMock.go @@ -0,0 +1,25 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} + +func (th *TxTypeHandlerMock) IsInterfaceNil() bool { + if th == nil { + return true + } + return false +} diff --git a/integrationTests/mock/unsignedTxHandlerMock.go b/integrationTests/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..7e7175bdbff --- /dev/null +++ b/integrationTests/mock/unsignedTxHandlerMock.go @@ -0,0 +1,61 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() +} + +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { + return + } + + ut.ProcessTransactionFeeCalled(cost) +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ut *UnsignedTxHandlerMock) IsInterfaceNil() bool { + if ut == nil { + return true + } + return false +} diff --git a/integrationTests/multiShard/block/executingMiniblocksSc_test.go b/integrationTests/multiShard/block/executingMiniblocksSc_test.go index 826e409ee66..ec6860ce474 100644 --- a/integrationTests/multiShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/multiShard/block/executingMiniblocksSc_test.go @@ -44,6 +44,7 @@ func TestProcessWithScTxsTopUpAndWithdrawOnlyProposers(t *testing.T) { nodeMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) nodes := []*integrationTests.TestProcessorNode{nodeShard0, nodeShard1, nodeMeta} + idxNodeShard0 := 0 idxNodeShard1 := 1 idxNodeMeta := 2 @@ -127,18 +128,47 @@ func TestProcessWithScTxsJoinAndRewardTwoNodesInShard(t *testing.T) { advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + nodeProposerShard0 := integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + ) + nodeValidatorShard0 := integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + ) - nodeProposerShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) - nodeValidatorShard0 := integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) - - nodeProposerShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) + nodeProposerShard1 := integrationTests.NewTestProcessorNode( + maxShards, + 1, + 1, + advertiserAddr, + ) hardCodedSk, _ := hex.DecodeString("5561d28b0d89fa425bbbf9e49a018b5d1e4a462c03d2efce60faf9ddece2af06") hardCodedScResultingAddress, _ := hex.DecodeString("000000000000000001006c560111a94e434413c1cdaafbc3e1348947d1d5b3a1") nodeProposerShard1.LoadTxSignSkBytes(hardCodedSk) - nodeValidatorShard1 := integrationTests.NewTestProcessorNode(maxShards, 1, 1, advertiserAddr) + nodeValidatorShard1 := integrationTests.NewTestProcessorNode( + maxShards, + 1, + 1, + advertiserAddr, + ) - nodeProposerMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) - nodeValidatorMeta := integrationTests.NewTestProcessorNode(maxShards, sharding.MetachainShardId, 0, advertiserAddr) + nodeProposerMeta := integrationTests.NewTestProcessorNode( + maxShards, + sharding.MetachainShardId, + 0, + advertiserAddr, + ) + nodeValidatorMeta := integrationTests.NewTestProcessorNode( + maxShards, + sharding.MetachainShardId, + 0, + advertiserAddr, + ) nodes := []*integrationTests.TestProcessorNode{ nodeProposerShard0, diff --git a/integrationTests/multiShard/block/executingMiniblocks_test.go b/integrationTests/multiShard/block/executingMiniblocks_test.go index 23f71be4fe4..d3ca59d7ec3 100644 --- a/integrationTests/multiShard/block/executingMiniblocks_test.go +++ b/integrationTests/multiShard/block/executingMiniblocks_test.go @@ -57,16 +57,16 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { //sender shard keys, receivers keys sendersPrivateKeys := make([]crypto.PrivateKey, 3) - receiversPrivateKeys := make(map[uint32][]crypto.PrivateKey) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) for i := 0; i < txToGenerateInEachMiniBlock; i++ { sendersPrivateKeys[i], _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) //receivers in same shard with the sender - sk, _, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) - receiversPrivateKeys[senderShard] = append(receiversPrivateKeys[senderShard], sk) + _, pk, _ := integrationTests.GenerateSkAndPkInShard(generateCoordinator, senderShard) + receiversPublicKeys[senderShard] = append(receiversPublicKeys[senderShard], pk) //receivers in other shards for _, shardId := range recvShards { - sk, _, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) - receiversPrivateKeys[shardId] = append(receiversPrivateKeys[shardId], sk) + _, pk, _ = integrationTests.GenerateSkAndPkInShard(generateCoordinator, shardId) + receiversPublicKeys[shardId] = append(receiversPublicKeys[shardId], pk) } } @@ -74,7 +74,14 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { integrationTests.CreateMintingForSenders(nodes, senderShard, sendersPrivateKeys, valMinting) fmt.Println("Generating transactions...") - integrationTests.GenerateAndDisseminateTxs(proposerNode, sendersPrivateKeys, receiversPrivateKeys, valToTransferPerTx) + integrationTests.GenerateAndDisseminateTxs( + proposerNode, + sendersPrivateKeys, + receiversPublicKeys, + valToTransferPerTx, + integrationTests.MinTxGasPrice, + integrationTests.MinTxGasLimit, + ) fmt.Println("Delaying for disseminating transactions...") time.Sleep(time.Second * 5) @@ -85,6 +92,10 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) } + gasPricePerTxBigInt := big.NewInt(0).SetUint64(integrationTests.MinTxGasPrice) + gasLimitPerTxBigInt := big.NewInt(0).SetUint64(integrationTests.MinTxGasLimit) + gasValue := big.NewInt(0).Mul(gasPricePerTxBigInt, gasLimitPerTxBigInt) + totalValuePerTx := big.NewInt(0).Add(gasValue, valToTransferPerTx) fmt.Println("Test nodes from proposer shard to have the correct balances...") for _, n := range nodes { isNodeInSenderShard := n.ShardCoordinator.SelfId() == senderShard @@ -94,13 +105,13 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { //test sender balances for _, sk := range sendersPrivateKeys { - valTransferred := big.NewInt(0).Mul(valToTransferPerTx, big.NewInt(int64(len(receiversPrivateKeys)))) + valTransferred := big.NewInt(0).Mul(totalValuePerTx, big.NewInt(int64(len(receiversPublicKeys)))) valRemaining := big.NewInt(0).Sub(valMinting, valTransferred) integrationTests.TestPrivateKeyHasBalance(t, n, sk, valRemaining) } //test receiver balances from same shard - for _, sk := range receiversPrivateKeys[proposerNode.ShardCoordinator.SelfId()] { - integrationTests.TestPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + for _, pk := range receiversPublicKeys[proposerNode.ShardCoordinator.SelfId()] { + integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } } @@ -118,8 +129,8 @@ func TestShouldProcessBlocksInMultiShardArchitecture(t *testing.T) { } //test receiver balances from same shard - for _, sk := range receiversPrivateKeys[n.ShardCoordinator.SelfId()] { - integrationTests.TestPrivateKeyHasBalance(t, n, sk, valToTransferPerTx) + for _, pk := range receiversPublicKeys[n.ShardCoordinator.SelfId()] { + integrationTests.TestPublicKeyHasBalance(t, n, pk, valToTransferPerTx) } } } diff --git a/integrationTests/multiShard/block/executingRewardMiniblocks_test.go b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go new file mode 100644 index 00000000000..070228eb130 --- /dev/null +++ b/integrationTests/multiShard/block/executingRewardMiniblocks_test.go @@ -0,0 +1,373 @@ +package block + +import ( + "context" + "errors" + "fmt" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func getRewardValue(node *integrationTests.TestProcessorNode) *big.Int { + return node.EconomicsData.RewardsValue() +} + +func getLeaderPercentage(node *integrationTests.TestProcessorNode) float64 { + return node.EconomicsData.LeaderPercentage() +} + +func TestExecuteBlocksWithTransactionsAndCheckRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 2 + nbShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + gasPrice := uint64(10) + gasLimit := uint64(100) + valToTransfer := big.NewInt(100) + nbTxsPerShard := uint32(100) + mintValue := big.NewInt(1000000) + + generateIntraShardTransactions(nodesMap, nbTxsPerShard, mintValue, valToTransfer, gasPrice, gasLimit) + + round := uint64(1) + nonce := uint64(1) + nbBlocksProduced := 7 + + randomness := generateInitialRandomness(uint32(nbShards)) + var headers map[uint32]data.HeaderHandler + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + mapRewardsForShardAddresses := make(map[string]uint32) + mapRewardsForMetachainAddresses := make(map[string]uint32) + nbTxsForLeaderAddress := make(map[string]uint32) + + for i := 0; i < nbBlocksProduced; i++ { + _, headers, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + + for shardId, consensusGroup := range consensusNodes { + shardRewardData := consensusGroup[0].SpecialAddressHandler.ConsensusShardRewardData() + addrRewards := shardRewardData.Addresses + updateExpectedRewards(mapRewardsForShardAddresses, addrRewards) + nbTxs := getTransactionsFromHeaderInShard(t, headers, shardId) + if len(addrRewards) > 0 { + updateNumberTransactionsProposed(t, nbTxsForLeaderAddress, addrRewards[0], nbTxs) + } + } + + updateRewardsForMetachain(mapRewardsForMetachainAddresses, consensusNodes[0][0]) + + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + } + + time.Sleep(time.Second) + + verifyRewardsForShards(t, nodesMap, mapRewardsForShardAddresses, nbTxsForLeaderAddress, gasPrice, gasLimit) + verifyRewardsForMetachain(t, mapRewardsForMetachainAddresses, nodesMap) +} + +func TestExecuteBlocksWithoutTransactionsAndCheckRewards(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 2 + nbShards := 2 + consensusGroupSize := 2 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + round := uint64(1) + nonce := uint64(1) + nbBlocksProduced := 7 + + randomness := generateInitialRandomness(uint32(nbShards)) + var headers map[uint32]data.HeaderHandler + var consensusNodes map[uint32][]*integrationTests.TestProcessorNode + mapRewardsForShardAddresses := make(map[string]uint32) + mapRewardsForMetachainAddresses := make(map[string]uint32) + nbTxsForLeaderAddress := make(map[string]uint32) + + for i := 0; i < nbBlocksProduced; i++ { + _, headers, consensusNodes, randomness = integrationTests.AllShardsProposeBlock(round, nonce, randomness, nodesMap) + + for shardId, consensusGroup := range consensusNodes { + if shardId == sharding.MetachainShardId { + continue + } + + shardRewardsData := consensusGroup[0].SpecialAddressHandler.ConsensusShardRewardData() + if shardRewardsData == nil { + shardRewardsData = &data.ConsensusRewardData{} + } + + addrRewards := shardRewardsData.Addresses + updateExpectedRewards(mapRewardsForShardAddresses, addrRewards) + } + + updateRewardsForMetachain(mapRewardsForMetachainAddresses, consensusNodes[0][0]) + + indexesProposers := getBlockProposersIndexes(consensusNodes, nodesMap) + integrationTests.VerifyNodesHaveHeaders(t, headers, nodesMap) + integrationTests.SyncAllShardsWithRoundBlock(t, nodesMap, indexesProposers, round) + round++ + nonce++ + } + + time.Sleep(time.Second) + + verifyRewardsForShards(t, nodesMap, mapRewardsForShardAddresses, nbTxsForLeaderAddress, 0, 0) + verifyRewardsForMetachain(t, mapRewardsForMetachainAddresses, nodesMap) +} + +func generateIntraShardTransactions( + nodesMap map[uint32][]*integrationTests.TestProcessorNode, + nbTxsPerShard uint32, + mintValue *big.Int, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, +) { + sendersPrivateKeys := make(map[uint32][]crypto.PrivateKey) + receiversPublicKeys := make(map[uint32][]crypto.PublicKey) + + for shardId, nodes := range nodesMap { + if shardId == sharding.MetachainShardId { + continue + } + + sendersPrivateKeys[shardId], receiversPublicKeys[shardId] = integrationTests.CreateSendersAndReceiversInShard( + nodes[0], + nbTxsPerShard, + ) + + fmt.Println("Minting sender addresses...") + integrationTests.CreateMintingForSenders( + nodes, + shardId, + sendersPrivateKeys[shardId], + mintValue, + ) + } + + integrationTests.CreateAndSendTransactions( + nodesMap, + sendersPrivateKeys, + receiversPublicKeys, + gasPrice, + gasLimit, + valToTransfer, + ) +} + +func getBlockProposersIndexes( + consensusMap map[uint32][]*integrationTests.TestProcessorNode, + nodesMap map[uint32][]*integrationTests.TestProcessorNode, +) map[uint32]int { + + indexProposer := make(map[uint32]int) + + for sh, testNodeList := range nodesMap { + for k, testNode := range testNodeList { + if consensusMap[sh][0] == testNode { + indexProposer[sh] = k + } + } + } + + return indexProposer +} + +func generateInitialRandomness(nbShards uint32) map[uint32][]byte { + randomness := make(map[uint32][]byte) + + for i := uint32(0); i < nbShards; i++ { + randomness[i] = []byte("root hash") + } + + randomness[sharding.MetachainShardId] = []byte("root hash") + + return randomness +} + +func getTransactionsFromHeaderInShard(t *testing.T, headers map[uint32]data.HeaderHandler, shardId uint32) uint32 { + if shardId == sharding.MetachainShardId { + return 0 + } + + header, ok := headers[shardId] + if !ok { + return 0 + } + + hdr, ok := header.(*block.Header) + if !ok { + assert.Error(t, process.ErrWrongTypeAssertion) + } + + nbTxs := uint32(0) + for _, mb := range hdr.MiniBlockHeaders { + if mb.SenderShardID == shardId && mb.Type == block.TxBlock { + nbTxs += mb.TxCount + } + } + + return nbTxs +} + +func updateExpectedRewards(rewardsForAddress map[string]uint32, addresses []string) { + for i := 0; i < len(addresses); i++ { + if addresses[i] == "" { + continue + } + + rewardsForAddress[addresses[i]]++ + } +} + +func updateNumberTransactionsProposed( + t *testing.T, + transactionsForLeader map[string]uint32, + addressProposer string, + nbTransactions uint32, +) { + if addressProposer == "" { + assert.Error(t, errors.New("invalid address")) + } + + transactionsForLeader[addressProposer] += nbTransactions +} + +func updateRewardsForMetachain(rewardsMap map[string]uint32, consensusNode *integrationTests.TestProcessorNode) { + metaRewardDataSlice := consensusNode.SpecialAddressHandler.ConsensusMetaRewardData() + if len(metaRewardDataSlice) > 0 { + for _, metaRewardData := range metaRewardDataSlice { + for _, addr := range metaRewardData.Addresses { + rewardsMap[addr]++ + } + } + } +} + +func verifyRewardsForMetachain( + t *testing.T, + mapRewardsForMeta map[string]uint32, + nodes map[uint32][]*integrationTests.TestProcessorNode, +) { + rewardValue := getRewardValue(nodes[0][0]) + + for metaAddr, numOfTimesRewarded := range mapRewardsForMeta { + addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(metaAddr)) + acc, err := nodes[0][0].AccntState.GetExistingAccount(addrContainer) + assert.Nil(t, err) + + expectedBalance := big.NewInt(0).SetUint64(uint64(numOfTimesRewarded)) + expectedBalance.Mul(expectedBalance, rewardValue) + assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) + } +} + +func verifyRewardsForShards( + t *testing.T, + nodesMap map[uint32][]*integrationTests.TestProcessorNode, + mapRewardsForAddress map[string]uint32, + nbTxsForLeaderAddress map[string]uint32, + gasPrice uint64, + gasLimit uint64, +) { + rewardValue := getRewardValue(nodesMap[0][0]) + feePerTxForLeader := float64(gasPrice) * float64(gasLimit) * getLeaderPercentage(nodesMap[0][0]) + + for address, nbRewards := range mapRewardsForAddress { + addrContainer, _ := integrationTests.TestAddressConverter.CreateAddressFromPublicKeyBytes([]byte(address)) + shard := nodesMap[0][0].ShardCoordinator.ComputeId(addrContainer) + + for _, shardNode := range nodesMap[shard] { + acc, err := shardNode.AccntState.GetExistingAccount(addrContainer) + assert.Nil(t, err) + + nbProposedTxs := nbTxsForLeaderAddress[address] + expectedBalance := big.NewInt(0).SetUint64(uint64(nbRewards)) + expectedBalance.Mul(expectedBalance, rewardValue) + totalFees := big.NewInt(0).SetUint64(uint64(nbProposedTxs)) + totalFees.Mul(totalFees, big.NewInt(0).SetUint64(uint64(feePerTxForLeader))) + + expectedBalance.Add(expectedBalance, totalFees) + fmt.Println(fmt.Sprintf("checking account %s has balance %d", core.ToB64(acc.AddressContainer().Bytes()), expectedBalance)) + assert.Equal(t, expectedBalance, acc.(*state.Account).Balance) + } + } +} diff --git a/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go new file mode 100644 index 00000000000..651638218fe --- /dev/null +++ b/integrationTests/multiShard/block/interceptedHeadersSigVerification_test.go @@ -0,0 +1,158 @@ +package block + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +const broadcastDelay = 2 * time.Second + +func TestInterceptedShardBlockHeaderVerifiedWithCorrectConsensusGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Shard node generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature(0, nodesMap, round, nonce, randomness) + + nodesMap[0][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain have the block header in pool as interceptor validates it + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard have the block in pool as interceptor validates it + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.Headers().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } +} + +func TestInterceptedMetaBlockVerifiedWithCorrectConsensusGroup(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + nodesPerShard := 4 + nbMetaNodes := 4 + nbShards := 1 + consensusGroupSize := 3 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + + seedAddress := integrationTests.GetConnectableAddress(advertiser) + + // create map of shard - testNodeProcessors for metachain and shard chain + nodesMap := integrationTests.CreateNodesWithNodesCoordinator( + nodesPerShard, + nbMetaNodes, + nbShards, + consensusGroupSize, + consensusGroupSize, + seedAddress, + ) + + for _, nodes := range nodesMap { + integrationTests.DisplayAndStartNodes(nodes) + } + + defer func() { + _ = advertiser.Close() + for _, nodes := range nodesMap { + for _, n := range nodes { + _ = n.Node.Stop() + } + } + }() + + fmt.Println("Metachain node Generating header and block body...") + + // one testNodeProcessor from shard proposes block signed by all other nodes in shard consensus + randomness := []byte("random seed") + round := uint64(1) + nonce := uint64(1) + + body, header, _, _ := integrationTests.ProposeBlockWithConsensusSignature( + sharding.MetachainShardId, + nodesMap, + round, + nonce, + randomness, + ) + + nodesMap[sharding.MetachainShardId][0].BroadcastBlock(body, header) + + time.Sleep(broadcastDelay) + + headerBytes, _ := integrationTests.TestMarshalizer.Marshal(header) + headerHash := integrationTests.TestHasher.Compute(string(headerBytes)) + + // all nodes in metachain do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + v, ok := metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shard do not have the block in pool as interceptor does not validate it with a wrong consensus + for _, shardNode := range nodesMap[0] { + v, ok := shardNode.ShardDataPool.MetaBlocks().Get(headerHash) + assert.True(t, ok) + assert.Equal(t, header, v) + } +} diff --git a/integrationTests/multiShard/smartContract/executingSCCalls_test.go b/integrationTests/multiShard/smartContract/executingSCCalls_test.go index 46d4a8f92d9..48ae037e36c 100644 --- a/integrationTests/multiShard/smartContract/executingSCCalls_test.go +++ b/integrationTests/multiShard/smartContract/executingSCCalls_test.go @@ -22,7 +22,7 @@ var gasPrice = 1 var gasLimit = 1000 var initialValueForInternalVariable = uint64(45) -func createScCallsNodes() (p2p.Messenger, []*testNode) { +func createScCallsNodes() (p2p.Messenger, map[uint32][]*testNode) { advertiser := createMessengerWithKadDht(context.Background(), "") _ = advertiser.Bootstrap() @@ -86,9 +86,10 @@ func haveTime() time.Duration { } // Test within a network of two shards the following situation -// 1. Node in first shard deploys a smart contract -> we also make sure that the resulting smart contract address falls within the same shard -// 2. The same account within the first shard calls the smart contract, we make sure the smart contract is updated and the gas -// is subtracted from the caller's balance +// 1. Node in first shard deploys a smart contract -> we also make sure that the resulting smart contract address falls +// within the same shard +// 2. The same account within the first shard calls the smart contract, we make sure the smart contract is updated and +// the gas is subtracted from the caller's balance func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -102,12 +103,14 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] + proposerNodeShard1 := nodes[0][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -116,7 +119,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShard(t *testing.T) { senderAddressBytes := []byte("12345678901234567890123456789012") // Minting sender account - createMintingForSenders(nodes, senderShard, [][]byte{senderAddressBytes}, senderMintingValue) + createMintingForSenders(nodes[0], senderShard, [][]byte{senderAddressBytes}, senderMintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShard1, generalRoundNumber, senderAddressBytes, senderNonce) @@ -181,13 +184,15 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShard1 := nodes[0] - proposerNodeShard2 := nodes[1] + proposerNodeShard1 := nodes[0][0] + proposerNodeShard2 := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -197,8 +202,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond secondShardAddressBytes := []byte("12345678901234567890123456789011") // Minting sender account - createMintingForSenders(nodes, senderShard, [][]byte{senderAddressBytes}, mintingValue) - createMintingForSenders(nodes, receiverShard, [][]byte{secondShardAddressBytes}, mintingValue) + createMintingForSenders(nodes[0], senderShard, [][]byte{senderAddressBytes}, mintingValue) + createMintingForSenders(nodes[1], receiverShard, [][]byte{secondShardAddressBytes}, mintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShard1, generalRoundNumber, senderAddressBytes, senderNonce) @@ -237,10 +242,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond // Test again that the gas for calling the smart contract was subtracted from the sender's account acc, _ = proposerNodeShard2.node.GetAccount(hex.EncodeToString(secondShardAddressBytes)) - // TODO: Afrer fees are implemented, from mintingValue we should subtract gasLimit + fees until the other shard executes - // the smart contract and a refund can be made with the remaining value the following rounds - assert.Equal(t, mintingValue, acc.Balance) - + afterFee := big.NewInt(0).Sub(mintingValue, big.NewInt(0).SetUint64(contractCallTx.GasLimit*contractCallTx.GasPrice)) + assert.Equal(t, afterFee, acc.Balance) receiverNonce++ assert.Equal(t, receiverNonce, acc.Nonce) @@ -278,13 +281,15 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond advertiser, nodes := createScCallsNodes() defer func() { _ = advertiser.Close() - for _, n := range nodes { - _ = n.node.Stop() + for _, nodeList := range nodes { + for _, n := range nodeList { + _ = n.node.Stop() + } } }() - proposerNodeShardSC := nodes[0] - proposerNodeShardAccount := nodes[1] + proposerNodeShardSC := nodes[0][0] + proposerNodeShardAccount := nodes[1][0] // delay for bootstrapping and topic announcement fmt.Println("Delaying for node bootstrap and topic announcement...") @@ -294,8 +299,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond accountShardAddressBytes := []byte("12345678901234567890123456789011") // Minting sender account - createMintingForSenders(nodes, scShard, [][]byte{scAccountAddressBytes}, mintingValue) - createMintingForSenders(nodes, accShard, [][]byte{accountShardAddressBytes}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scAccountAddressBytes}, mintingValue) + createMintingForSenders(nodes[1], accShard, [][]byte{accountShardAddressBytes}, mintingValue) // should deploy smart contract -> we process a block containing only the sc deployment tx deploySmartContract(t, proposerNodeShardSC, generalRoundNumber, scAccountAddressBytes, accNonce) @@ -314,7 +319,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond scDeploymentAdddress, _ := hex.DecodeString("00000000000000000000cca1490e8cd87c767da41cdab632a7a206c5703c3132") // Update the SC account balance so we can call withdraw function - createMintingForSenders(nodes, scShard, [][]byte{scDeploymentAdddress}, mintingValue) + createMintingForSenders(nodes[0], scShard, [][]byte{scDeploymentAdddress}, mintingValue) // Now that the SC is deployed, we test a call from an account located in the second shard withdrawValue := uint64(100) @@ -328,8 +333,8 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond withdrawValue, ) - // The account shard should process this tx as MoveBalance scNonce++ + // The account shard should process this tx as MoveBalance processAndTestSmartContractCallInSender( t, contractCallTx, @@ -339,6 +344,7 @@ func TestProcessSCCallsInMultiShardArchitecture_FirstShardReceivesCallFromSecond mintingValue, scNonce, ) + generalRoundNumber++ // After second shard processed the transaction, tx should get into the first shard where the SC resides @@ -382,9 +388,8 @@ func processAndTestSmartContractCallInSender( // Test again that the gas for calling the smart contract was subtracted from the sender's account acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) - // TODO: Afrer fees are implemented, from mintingValue we should subtract gasLimit + fees until the other shard executes - // the smart contract and a refund can be made with the remaining value the following rounds - assert.Equal(t, mintingValue, acc.Balance) + afterFee := big.NewInt(0).Sub(mintingValue, big.NewInt(0).SetUint64(contractCallTx.GasLimit*contractCallTx.GasPrice)) + assert.Equal(t, afterFee, acc.Balance) assert.Equal(t, scNonce, acc.Nonce) } @@ -443,7 +448,7 @@ func processAndTestIntermediateResults(t *testing.T, proposerNodeShardSC *testNo // - Initial balance + withdraw value - fees // TODO: Fees and gas should be taken into consideration when the fees are implemented - now we have extra money // from the gas returned since the gas was not subtracted in the first place - finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue+uint64(gasLimit-1*gasPrice)))) + finalValue := big.NewInt(0).Add(mintingValue, big.NewInt(int64(withdrawValue-1))) acc, _ := proposerNodeShardAccount.node.GetAccount(hex.EncodeToString(accountShardAddressBytes)) assert.Equal(t, finalValue, acc.Balance) } diff --git a/integrationTests/multiShard/smartContract/testInitilalizer.go b/integrationTests/multiShard/smartContract/testInitializer.go similarity index 72% rename from integrationTests/multiShard/smartContract/testInitilalizer.go rename to integrationTests/multiShard/smartContract/testInitializer.go index 5bc569414f2..c1258d7eff5 100644 --- a/integrationTests/multiShard/smartContract/testInitilalizer.go +++ b/integrationTests/multiShard/smartContract/testInitializer.go @@ -35,6 +35,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/requestHandlers" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/hashing/sha256" + "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/node" @@ -45,9 +46,11 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" @@ -55,11 +58,13 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/memorydb" "github.com/ElrondNetwork/elrond-go/storage/storageUnit" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/btcsuite/btcd/btcec" libp2pCrypto "github.com/libp2p/go-libp2p-core/crypto" ) +//TODO refactor this package to use TestNodeProcessor infrastructure + var r *rand.Rand var testHasher = sha256.Sha256{} var testMarshalizer = &marshal.JsonMarshalizer{} @@ -70,6 +75,8 @@ var addrConv, _ = addressConverters.NewPlainAddressConverter(32, "0x") var opGas = int64(1) +const maxTxNonceDeltaAllowed = 8000 + func init() { r = rand.New(rand.NewSource(time.Now().UnixNano())) } @@ -102,6 +109,80 @@ type testNode struct { txsRecv int32 } +type keyPair struct { + sk crypto.PrivateKey + pk crypto.PublicKey +} + +type cryptoParams struct { + keyGen crypto.KeyGenerator + keys map[uint32][]*keyPair + singleSigner crypto.SingleSigner +} + +func genValidatorsFromPubKeys(pubKeysMap map[uint32][]string) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + for i := 0; i < len(shardNodesPks); i++ { + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), []byte(shardNodesPks[i])) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +func createCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards int) *cryptoParams { + suite := kyber.NewBlakeSHA256Ed25519() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*keyPair) + keyPairs := make([]*keyPair, nodesPerShard) + for shardId := 0; shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[uint32(shardId)] = keyPairs + } + + keyPairs = make([]*keyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &keyPair{} + kp.sk, kp.pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &cryptoParams{ + keys: keysMap, + keyGen: keyGen, + singleSigner: singleSigner, + } + + return params +} + +func pubKeysMapFromKeysMap(keyPairMap map[uint32][]*keyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + bytes, _ := pair.pk.ToByteArray() + shardKeys[i] = string(bytes) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + func createTestShardChain() *blockchain.BlockChain { cfgCache := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} badBlockCache, _ := storageUnit.NewCache(cfgCache.Type, cfgCache.Size, cfgCache.Shards) @@ -132,6 +213,7 @@ func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { store.AddStorer(dataRetriever.PeerChangesUnit, createMemUnit()) store.AddStorer(dataRetriever.BlockHeaderUnit, createMemUnit()) store.AddStorer(dataRetriever.UnsignedTransactionUnit, createMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, createMemUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, createMemUnit()) for i := uint32(0); i < numOfShards; i++ { @@ -145,6 +227,7 @@ func createTestShardStore(numOfShards uint32) dataRetriever.StorageService { func createTestShardDataPool() dataRetriever.PoolsHolder { txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) @@ -164,6 +247,7 @@ func createTestShardDataPool() dataRetriever.PoolsHolder { dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, @@ -191,12 +275,14 @@ func createNetNode( dPool dataRetriever.PoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, targetShardId uint32, initialAddr string, + params *cryptoParams, + keysIndex int, ) ( *node.Node, p2p.Messenger, - crypto.PrivateKey, dataRetriever.ResolversFinder, process.BlockProcessor, process.TransactionProcessor, @@ -206,41 +292,42 @@ func createNetNode( dataRetriever.StorageService) { messenger := createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - for { - pkBytes, _ := pk.ToByteArray() - addr, _ := testAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) - if shardCoordinator.ComputeId(addr) == targetShardId { - break - } - sk, pk = keyGen.GeneratePair() - } - - pkBuff, _ := pk.ToByteArray() - fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) + keyPair := params.keys[targetShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() + fmt.Printf("pk: %s\n", hex.EncodeToString(pkBuff)) blkc := createTestShardChain() store := createTestShardStore(shardCoordinator.NumberOfShards()) uint64Converter := uint64ByteSlice.NewBigEndianConverter() dataPacker, _ := partitioning.NewSimpleDataPacker(testMarshalizer) + feeHandler := &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return integrationTests.MinTxGasPrice + }, + MinGasLimitCalled: func() uint64 { + return integrationTests.MinTxGasLimit + }, + MinTxFeeCalled: func() uint64 { + return integrationTests.MinTxGasLimit * integrationTests.MinTxGasPrice + }, + } + interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( accntAdapter, shardCoordinator, + nodesCoordinator, messenger, store, testMarshalizer, testHasher, - keyGen, - singleSigner, + params.keyGen, + params.singleSigner, testMultiSig, dPool, testAddressConverter, - &mock.ChronologyValidatorMock{}, + maxTxNonceDeltaAllowed, + feeHandler, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -258,18 +345,44 @@ func createNetNode( ) resolversContainer, _ := resolversContainerFactory.Create() resolversFinder, _ := containers.NewResolversFinder(resolversContainer, shardCoordinator) - requestHandler, _ := requestHandlers.NewShardResolverRequestHandler(resolversFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, factory.MiniBlocksTopic, factory.HeadersTopic, factory.MetachainBlocksTopic, 100) + requestHandler, _ := requestHandlers.NewShardResolverRequestHandler( + resolversFinder, + factory.TransactionTopic, + factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, + factory.MiniBlocksTopic, + factory.HeadersTopic, + factory.MetachainBlocksTopic, + 100, + ) + + economicsData := &economics.EconomicsData{} interimProcFactory, _ := shard.NewIntermediateProcessorsContainerFactory( shardCoordinator, testMarshalizer, testHasher, testAddressConverter, + mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + nodesCoordinator, + ), store, + dPool, + economicsData, ) interimProcContainer, _ := interimProcFactory.Create() scForwarder, _ := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - + rewardsInter, _ := interimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + internalTxProducer, _ := rewardsInter.(process.InternalTransactionProducer) + rewardProcessor, _ := rewardTransaction.NewRewardTxProcessor( + accntAdapter, + addrConv, + shardCoordinator, + rewardsInter, + ) vm, blockChainHook := createVMAndBlockchainHook(accntAdapter) vmContainer := &mock.VMContainerMock{ GetCalled: func(key []byte) (handler vmcommon.VMExecutionHandler, e error) { @@ -286,8 +399,11 @@ func createNetNode( addrConv, shardCoordinator, scForwarder, + rewardsHandler, ) + txTypeHandler, _ := coordinator.NewTxTypeHandler(addrConv, shardCoordinator, accntAdapter) + txProcessor, _ := transaction.NewTxProcessor( accntAdapter, testHasher, @@ -295,6 +411,19 @@ func createNetNode( testMarshalizer, shardCoordinator, scProcessor, + rewardsHandler, + txTypeHandler, + &mock.FeeHandlerStub{ + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + MinGasPriceCalled: func() uint64 { + return 0 + }, + }, ) fact, _ := shard.NewPreProcessorsContainerFactory( @@ -309,6 +438,19 @@ func createNetNode( txProcessor, scProcessor, scProcessor, + rewardProcessor, + internalTxProducer, + &mock.FeeHandlerStub{ + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + MinGasPriceCalled: func() uint64 { + return 0 + }, + }, ) container, _ := fact.Create() @@ -324,7 +466,7 @@ func createNetNode( genesisBlocks := createGenesisBlocks(shardCoordinator) arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &block.ArgBaseProcessor{ + ArgBaseProcessor: block.ArgBaseProcessor{ Accounts: accntAdapter, ForkDetector: &mock.ForkDetectorMock{ AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { @@ -341,23 +483,20 @@ func createNetNode( Marshalizer: testMarshalizer, Store: store, ShardCoordinator: shardCoordinator, - Uint64Converter: uint64Converter, - StartHeaders: genesisBlocks, - RequestHandler: requestHandler, - Core: &mock.ServiceContainerMock{}, - }, - DataPool: dPool, - BlocksTracker: &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + nodesCoordinator, + ), + Uint64Converter: uint64Converter, + StartHeaders: genesisBlocks, + RequestHandler: requestHandler, + Core: &mock.ServiceContainerMock{}, }, - TxCoordinator: tc, + DataPool: dPool, + TxCoordinator: tc, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } blockProcessor, _ := block.NewShardProcessor(arguments) @@ -371,14 +510,14 @@ func createNetNode( node.WithDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithTxSignPrivKey(sk), - node.WithTxSignPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithTxSignPrivKey(keyPair.sk), + node.WithTxSignPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(resolversFinder), node.WithBlockProcessor(blockProcessor), @@ -390,7 +529,7 @@ func createNetNode( fmt.Println(err.Error()) } - return n, messenger, sk, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store + return n, messenger, resolversFinder, blockProcessor, txProcessor, tc, scForwarder, blkc, store } func createMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Messenger { @@ -421,18 +560,20 @@ func getConnectableAddress(mes p2p.Messenger) string { return "" } -func displayAndStartNodes(nodes []*testNode) { - for _, n := range nodes { - skBuff, _ := n.sk.ToByteArray() - pkBuff, _ := n.pk.ToByteArray() +func displayAndStartNodes(nodes map[uint32][]*testNode) { + for _, nodeList := range nodes { + for _, n := range nodeList { + skBuff, _ := n.sk.ToByteArray() + pkBuff, _ := n.pk.ToByteArray() - fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", - n.shardId, - hex.EncodeToString(skBuff), - hex.EncodeToString(pkBuff), - ) - _ = n.node.Start() - _ = n.node.P2PBootstrap() + fmt.Printf("Shard ID: %v, sk: %s, pk: %s\n", + n.shardId, + hex.EncodeToString(skBuff), + hex.EncodeToString(pkBuff), + ) + _ = n.node.Start() + _ = n.node.P2PBootstrap() + } } } @@ -440,14 +581,17 @@ func createNodes( numOfShards int, nodesPerShard int, serviceID string, -) []*testNode { - +) map[uint32][]*testNode { //first node generated will have is pk belonging to firstSkShardId numMetaChainNodes := 1 - nodes := make([]*testNode, int(numOfShards)*nodesPerShard+numMetaChainNodes) + nodes := make(map[uint32][]*testNode) + cp := createCryptoParams(nodesPerShard, numMetaChainNodes, numOfShards) + keysMap := pubKeysMapFromKeysMap(cp.keys) + validatorsMap := genValidatorsFromPubKeys(keysMap) - idx := 0 for shardId := 0; shardId < numOfShards; shardId++ { + shardNodes := make([]*testNode, nodesPerShard) + for j := 0; j < nodesPerShard; j++ { testNode := &testNode{ dPool: createTestShardDataPool(), @@ -455,20 +599,33 @@ func createNodes( } shardCoordinator, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), uint32(shardId)) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + uint32(shardId), + uint32(numOfShards), + validatorsMap, + ) + accntAdapter := createAccountsDB() - n, mes, sk, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( + n, mes, resFinder, blkProcessor, txProcessor, transactionCoordinator, scrForwarder, blkc, store := createNetNode( testNode.dPool, accntAdapter, shardCoordinator, + nodesCoordinator, testNode.shardId, serviceID, + cp, + j, ) _ = n.CreateShardedStores() + KeyPair := cp.keys[uint32(shardId)][j] testNode.node = n - testNode.sk = sk + testNode.sk = KeyPair.sk testNode.messenger = mes - testNode.pk = sk.GeneratePublic() + testNode.pk = KeyPair.pk testNode.resFinder = resFinder testNode.accntState = accntAdapter testNode.blkProcessor = blkProcessor @@ -504,27 +661,41 @@ func createNodes( testMarshalizer, mes, shardCoordinator, - sk, + KeyPair.sk, &singlesig.SchnorrSigner{}, ) - nodes[idx] = testNode - idx++ + shardNodes[j] = testNode } + + nodes[uint32(shardId)] = shardNodes } - shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) - tn := createMetaNetNode( - createTestMetaDataPool(), - createAccountsDB(), - shardCoordinatorMeta, - serviceID, - ) + metaNodes := make([]*testNode, numMetaChainNodes) for i := 0; i < numMetaChainNodes; i++ { - idx := i + int(numOfShards)*nodesPerShard - nodes[idx] = tn + shardCoordinatorMeta, _ := sharding.NewMultiShardCoordinator(uint32(numOfShards), sharding.MetachainShardId) + nodesCoordinator, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + testHasher, + sharding.MetachainShardId, + uint32(numOfShards), + validatorsMap, + ) + + metaNodes[i] = createMetaNetNode( + createTestMetaDataPool(), + createAccountsDB(), + shardCoordinatorMeta, + nodesCoordinator, + serviceID, + cp, + i, + ) } + nodes[sharding.MetachainShardId] = metaNodes + return nodes } @@ -578,18 +749,17 @@ func createMetaNetNode( dPool dataRetriever.MetaPoolsHolder, accntAdapter state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, initialAddr string, + params *cryptoParams, + keysIndex int, ) *testNode { tn := testNode{} tn.messenger = createMessengerWithKadDht(context.Background(), initialAddr) - suite := kyber.NewBlakeSHA256Ed25519() - singleSigner := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) - sk, pk := keyGen.GeneratePair() - - pkBuff, _ := pk.ToByteArray() + keyPair := params.keys[sharding.MetachainShardId][keysIndex] + pkBuff, _ := keyPair.pk.ToByteArray() fmt.Printf("Found pk: %s\n", hex.EncodeToString(pkBuff)) tn.blkc = createTestMetaChain() @@ -598,13 +768,13 @@ func createMetaNetNode( interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, tn.messenger, store, testMarshalizer, testHasher, testMultiSig, dPool, - &mock.ChronologyValidatorMock{}, ) interceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { @@ -625,29 +795,39 @@ func createMetaNetNode( requestHandler, _ := requestHandlers.NewMetaResolverRequestHandler(resolvers, factory.ShardHeadersForMetachainTopic, factory.MetachainBlocksTopic) genesisBlocks := createGenesisBlocks(shardCoordinator) - blkProc, _ := block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accntAdapter, - dPool, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { - return nil - }, - GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - ProbableHighestNonceCalled: func() uint64 { - return 0 + + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: block.ArgBaseProcessor{ + Accounts: accntAdapter, + ForkDetector: &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { + return nil + }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + ProbableHighestNonceCalled: func() uint64 { + return 0 + }, }, + Hasher: testHasher, + Marshalizer: testMarshalizer, + Store: store, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: mock.NewSpecialAddressHandlerMock( + testAddressConverter, + shardCoordinator, + nodesCoordinator, + ), + Uint64Converter: uint64Converter, + StartHeaders: genesisBlocks, + RequestHandler: requestHandler, + Core: &mock.ServiceContainerMock{}, }, - shardCoordinator, - testHasher, - testMarshalizer, - store, - genesisBlocks, - requestHandler, - uint64Converter, - ) + DataPool: dPool, + } + blkProc, _ := block.NewMetaProcessor(arguments) _ = tn.blkc.SetGenesisHeader(genesisBlocks[sharding.MetachainShardId]) @@ -657,8 +837,8 @@ func createMetaNetNode( testMarshalizer, tn.messenger, shardCoordinator, - sk, - singleSigner, + keyPair.sk, + params.singleSigner, ) n, err := node.NewNode( @@ -668,14 +848,14 @@ func createMetaNetNode( node.WithMetaDataPool(dPool), node.WithAddressConverter(testAddressConverter), node.WithAccountsAdapter(accntAdapter), - node.WithKeyGen(keyGen), + node.WithKeyGen(params.keyGen), node.WithShardCoordinator(shardCoordinator), node.WithBlockChain(tn.blkc), node.WithUint64ByteSliceConverter(uint64Converter), node.WithMultiSigner(testMultiSig), - node.WithSingleSigner(singleSigner), - node.WithPrivKey(sk), - node.WithPubKey(pk), + node.WithSingleSigner(params.singleSigner), + node.WithPrivKey(keyPair.sk), + node.WithPubKey(keyPair.pk), node.WithInterceptorsContainer(interceptorsContainer), node.WithResolversFinder(resolvers), node.WithBlockProcessor(tn.blkProcessor), @@ -688,8 +868,8 @@ func createMetaNetNode( } tn.node = n - tn.sk = sk - tn.pk = pk + tn.sk = keyPair.sk + tn.pk = keyPair.pk tn.accntState = accntAdapter tn.shardId = sharding.MetachainShardId diff --git a/integrationTests/node/getAccount_test.go b/integrationTests/node/getAccount_test.go index f190a56760f..4cb51e19e6f 100644 --- a/integrationTests/node/getAccount_test.go +++ b/integrationTests/node/getAccount_test.go @@ -13,7 +13,7 @@ import ( func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { t.Parallel() - accDB, _, _ := integrationTests.CreateAccountsDB(nil) + accDB, _, _ := integrationTests.CreateAccountsDB(0) n, _ := node.NewNode( node.WithAccountsAdapter(accDB), @@ -32,7 +32,7 @@ func TestNode_GetAccountAccountDoesNotExistsShouldRetEmpty(t *testing.T) { func TestNode_GetAccountAccountExistsShouldReturn(t *testing.T) { t.Parallel() - accDB, _, _ := integrationTests.CreateAccountsDB(nil) + accDB, _, _ := integrationTests.CreateAccountsDB(0) addressHex := integrationTests.CreateRandomHexString(64) addressBytes, _ := hex.DecodeString(addressHex) diff --git a/integrationTests/node/heartbeat_test.go b/integrationTests/node/heartbeat_test.go index c78b3c715a9..883665cb7e5 100644 --- a/integrationTests/node/heartbeat_test.go +++ b/integrationTests/node/heartbeat_test.go @@ -3,6 +3,8 @@ package node import ( "context" "encoding/hex" + "encoding/json" + "errors" "fmt" "testing" "time" @@ -13,6 +15,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/mock" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" @@ -159,16 +162,37 @@ func createSender(messenger p2p.Messenger, topic string) (*heartbeat.Sender, cry } func createMonitor(maxDurationPeerUnresponsive time.Duration) *heartbeat.Monitor { - suite := kyber.NewBlakeSHA256Ed25519() - signer := &singlesig.SchnorrSigner{} - keyGen := signing.NewKeyGenerator(suite) monitor, _ := heartbeat.NewMonitor( - signer, - keyGen, integrationTests.TestMarshalizer, maxDurationPeerUnresponsive, map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var hb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &hb) + return &hb, nil + }, + }, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + SaveKeysCalled: func(peersSlice [][]byte) error { + return nil + }, + }, + &heartbeat.RealTimer{}, ) return monitor diff --git a/integrationTests/singleShard/block/executingMiniblocksSc_test.go b/integrationTests/singleShard/block/executingMiniblocksSc_test.go index dc06459908c..b8a425f5f6a 100644 --- a/integrationTests/singleShard/block/executingMiniblocksSc_test.go +++ b/integrationTests/singleShard/block/executingMiniblocksSc_test.go @@ -38,7 +38,12 @@ func TestShouldProcessWithScTxsJoinAndRewardOneRound(t *testing.T) { nodes := make([]*integrationTests.TestProcessorNode, numOfNodes) for i := 0; i < numOfNodes; i++ { - nodes[i] = integrationTests.NewTestProcessorNode(maxShards, 0, 0, advertiserAddr) + nodes[i] = integrationTests.NewTestProcessorNode( + maxShards, + 0, + 0, + advertiserAddr, + ) } idxProposer := 0 diff --git a/integrationTests/singleShard/block/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr_test.go index e71a1cf9d7c..735edd85e83 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr_test.go @@ -69,7 +69,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { } hdr2 := block.Header{ - Nonce: 1, + Nonce: 0, PubKeysBitmap: []byte{255, 0}, Signature: []byte("signature"), PrevHash: []byte("prev hash"), diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go index ae197e8e0ff..077a19b2516 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx_test.go @@ -9,6 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/singlesig" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process" @@ -52,11 +53,12 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { integrationTests.CreateMintingForSenders([]*integrationTests.TestProcessorNode{nRequester}, 0, []crypto.PrivateKey{nRequester.OwnAccount.SkTxSign}, valMinting) //Step 1. Generate a signed transaction tx := transaction.Transaction{ - Nonce: 0, - Value: big.NewInt(0), - RcvAddr: integrationTests.TestHasher.Compute("receiver"), - SndAddr: buffPk1, - Data: "tx notarized data", + Nonce: 0, + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + SndAddr: buffPk1, + Data: "tx notarized data", + GasLimit: integrationTests.MinTxGasLimit, } txBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) @@ -102,3 +104,83 @@ func TestNode_RequestInterceptTransactionWithMessenger(t *testing.T) { assert.Fail(t, "timeout") } } + +func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var nrOfShards uint32 = 1 + var shardID uint32 = 0 + var txSignPrivKeyShardId uint32 = 0 + requesterNodeAddr := "0" + resolverNodeAddr := "1" + + fmt.Println("Requester: ") + nRequester := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, requesterNodeAddr) + + fmt.Println("Resolver:") + nResolver := integrationTests.NewTestProcessorNode(nrOfShards, shardID, txSignPrivKeyShardId, resolverNodeAddr) + _ = nRequester.Node.Start() + _ = nResolver.Node.Start() + defer func() { + _ = nRequester.Node.Stop() + _ = nResolver.Node.Stop() + }() + + //connect messengers together + time.Sleep(time.Second) + err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + assert.Nil(t, err) + + time.Sleep(time.Second) + + //Step 1. Generate a reward Transaction + tx := rewardTx.RewardTx{ + Value: big.NewInt(0), + RcvAddr: integrationTests.TestHasher.Compute("receiver"), + Round: 0, + Epoch: 0, + ShardId: 0, + } + + marshaledTxBuff, _ := integrationTests.TestMarshalizer.Marshal(&tx) + + fmt.Printf("Transaction: %v\n%v\n", tx, string(marshaledTxBuff)) + + chanDone := make(chan bool) + + txHash := integrationTests.TestHasher.Compute(string(marshaledTxBuff)) + + //step 2. wire up a received handler for requester + nRequester.ShardDataPool.RewardTransactions().RegisterHandler(func(key []byte) { + rewardTxStored, _ := nRequester.ShardDataPool.RewardTransactions().ShardDataStore( + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ).Get(key) + + if reflect.DeepEqual(rewardTxStored, &tx) { + chanDone <- true + } + + assert.Equal(t, rewardTxStored, &tx) + assert.Equal(t, txHash, key) + }) + + //Step 3. add the transaction in resolver pool + nResolver.ShardDataPool.RewardTransactions().AddData( + txHash, + &tx, + process.ShardCacherIdentifier(nRequester.ShardCoordinator.SelfId(), nRequester.ShardCoordinator.SelfId()), + ) + + //Step 4. request tx + rewardTxResolver, _ := nRequester.ResolverFinder.IntraShardResolver(factory.RewardsTransactionTopic) + err = rewardTxResolver.RequestDataFromHash(txHash) + assert.Nil(t, err) + + select { + case <-chanDone: + case <-time.After(time.Second * 3): + assert.Fail(t, "timeout") + } +} diff --git a/integrationTests/state/genesisState_test.go b/integrationTests/state/genesisState_test.go index 34c8ba78aba..55760a98e8a 100644 --- a/integrationTests/state/genesisState_test.go +++ b/integrationTests/state/genesisState_test.go @@ -299,7 +299,7 @@ func printTestDebugLines( } func getRootHashByRunningInitialBalances(initialBalances []*sharding.InitialBalance) ([]byte, state.AccountsAdapter) { - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) uniformIndexes := make([]int, len(initialBalances)) for i := 0; i < len(initialBalances); i++ { diff --git a/integrationTests/state/stateExecTransaction_test.go b/integrationTests/state/stateExecTransaction_test.go index 3f2df6ab972..b903b2f5553 100644 --- a/integrationTests/state/stateExecTransaction_test.go +++ b/integrationTests/state/stateExecTransaction_test.go @@ -19,7 +19,7 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { t.Skip("this is not a short test") } - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) nonce := uint64(6) balance := big.NewInt(10000) @@ -28,12 +28,14 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { address := integrationTests.CreateAccount(accnts, nonce, balance) hashCreated, _ := accnts.Commit() - //Step 2. create a tx moving 1 from address to address + //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff tx := &transaction.Transaction{ - Nonce: nonce, - Value: big.NewInt(1), - SndAddr: address.Bytes(), - RcvAddr: address.Bytes(), + Nonce: nonce, + Value: big.NewInt(1), + GasLimit: 2, + GasPrice: 1, + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), } err := txProcessor.ProcessTransaction(tx, 0) @@ -42,6 +44,8 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { hashAfterExec, _ := accnts.Commit() assert.NotEqual(t, hashCreated, hashAfterExec) + balance = balance.Sub(balance, big.NewInt(0).SetUint64(tx.GasPrice*tx.GasLimit)) + accountAfterExec, _ := accnts.GetAccountWithJournal(address) assert.Equal(t, nonce+1, accountAfterExec.(*state.Account).Nonce) assert.Equal(t, balance, accountAfterExec.(*state.Account).Balance) @@ -50,8 +54,9 @@ func TestExecTransaction_SelfTransactionShouldWork(t *testing.T) { func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) + nonce := uint64(6) balance := big.NewInt(10000) @@ -61,10 +66,12 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { //Step 2. create a tx moving 1 from pubKeyBuff to pubKeyBuff tx := &transaction.Transaction{ - Nonce: nonce, - Value: big.NewInt(1), - SndAddr: address.Bytes(), - RcvAddr: address.Bytes(), + Nonce: nonce, + Value: big.NewInt(1), + SndAddr: address.Bytes(), + RcvAddr: address.Bytes(), + GasLimit: 2, + GasPrice: 2, } err := txProcessor.ProcessTransaction(tx, 0) @@ -80,7 +87,7 @@ func TestExecTransaction_SelfTransactionWithRevertShouldWork(t *testing.T) { func TestExecTransaction_MoreTransactionsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) @@ -106,15 +113,20 @@ func testExecTransactionsMoreTxWithRevert( ) { txProcessor := integrationTests.CreateSimpleTxProcessor(accnts) - txToGenerate := 15000 + txToGenerate := 15000 + gasPrice := uint64(2) + gasLimit := uint64(2) + value := uint64(1) //Step 1. execute a lot moving transactions from pubKeyBuff to another pubKeyBuff for i := 0; i < txToGenerate; i++ { tx := &transaction.Transaction{ - Nonce: initialNonce + uint64(i), - Value: big.NewInt(1), - SndAddr: sender.Bytes(), - RcvAddr: receiver.Bytes(), + Nonce: initialNonce + uint64(i), + Value: big.NewInt(int64(value)), + GasPrice: gasPrice, + GasLimit: gasLimit, + SndAddr: sender.Bytes(), + RcvAddr: receiver.Bytes(), } err := txProcessor.ProcessTransaction(tx, 0) @@ -129,7 +141,7 @@ func testExecTransactionsMoreTxWithRevert( newAccount, _ := accnts.GetAccountWithJournal(receiver) account, _ := accnts.GetAccountWithJournal(sender) - assert.Equal(t, account.(*state.Account).Balance, big.NewInt(initialBalance-int64(txToGenerate))) + assert.Equal(t, account.(*state.Account).Balance, big.NewInt(initialBalance-int64(uint64(txToGenerate)*(gasPrice*gasLimit+value)))) assert.Equal(t, account.(*state.Account).Nonce, uint64(txToGenerate)+initialNonce) assert.Equal(t, newAccount.(*state.Account).Balance, big.NewInt(int64(txToGenerate))) @@ -161,7 +173,7 @@ func testExecTransactionsMoreTxWithRevert( func TestExecTransaction_MoreTransactionsMoreIterationsWithRevertShouldWork(t *testing.T) { t.Parallel() - accnts, _, _ := integrationTests.CreateAccountsDB(nil) + accnts, _, _ := integrationTests.CreateAccountsDB(0) nonce := uint64(6) initialBalance := int64(100000) diff --git a/integrationTests/state/stateTrie_test.go b/integrationTests/state/stateTrie_test.go index 325799355f5..33cdef8868b 100644 --- a/integrationTests/state/stateTrie_test.go +++ b/integrationTests/state/stateTrie_test.go @@ -152,7 +152,7 @@ func TestAccountsDB_GetJournalizedAccountReturnNotFoundAccntShouldWork(t *testin func TestAccountsDB_GetExistingAccountConcurrentlyShouldWork(t *testing.T) { t.Parallel() - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) wg := sync.WaitGroup{} wg.Add(2000) @@ -283,7 +283,7 @@ func TestAccountsDB_CommitTwoOkAccountsWithRecreationFromStorageShouldWork(t *te //verifies that commit saves the new tries and that can be loaded back t.Parallel() - adb, _, mu := integrationTests.CreateAccountsDB(nil) + adb, _, mu := integrationTests.CreateAccountsDB(0) adr1 := integrationTests.CreateRandomAddress() adr2 := integrationTests.CreateRandomAddress() @@ -348,7 +348,7 @@ func TestAccountsDB_CommitAnEmptyStateShouldWork(t *testing.T) { } }() - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) hash, err := adb.Commit() @@ -418,7 +418,7 @@ func TestAccountsDB_RevertNonceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -489,7 +489,7 @@ func TestAccountsDB_RevertBalanceStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -561,7 +561,7 @@ func TestAccountsDB_RevertCodeStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -627,7 +627,7 @@ func TestAccountsDB_RevertDataStepByStepAccountDataShouldWork(t *testing.T) { adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -702,7 +702,7 @@ func TestAccountsDB_RevertDataStepByStepWithCommitsAccountDataShouldWork(t *test adr2 := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) rootHash, err := adb.RootHash() assert.Nil(t, err) hrEmpty := base64.StdEncoding.EncodeToString(rootHash) @@ -797,7 +797,7 @@ func TestAccountsDB_ExecBalanceTxExecution(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -850,7 +850,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -883,7 +883,7 @@ func TestAccountsDB_ExecALotOfBalanceTxOKorNOK(t *testing.T) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(t, err) @@ -1048,7 +1048,7 @@ func BenchmarkTxExecution(b *testing.B) { adrDest := integrationTests.CreateRandomAddress() //Step 1. create accounts objects - adb, _, _ := integrationTests.CreateAccountsDB(nil) + adb, _, _ := integrationTests.CreateAccountsDB(0) acntSrc, err := adb.GetAccountWithJournal(adrSrc) assert.Nil(b, err) diff --git a/integrationTests/sync/basicSync_test.go b/integrationTests/sync/basicSync_test.go index 0f028e49429..16ca199ad72 100644 --- a/integrationTests/sync/basicSync_test.go +++ b/integrationTests/sync/basicSync_test.go @@ -6,14 +6,13 @@ import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) -var stepDelay = time.Second -var delayP2pBootstrap = time.Second * 2 - func TestSyncWorksInShard_EmptyBlocksNoForks(t *testing.T) { if testing.Short() { t.Skip("this is not a short test") @@ -74,16 +73,101 @@ func TestSyncWorksInShard_EmptyBlocksNoForks(t *testing.T) { for i := 0; i < numRoundsToTest; i++ { integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) - time.Sleep(stepDelay) + time.Sleep(stepSync) + + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + nonce++ + } + + time.Sleep(stepSync) + + testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) +} + +func TestSyncWorksInShard_EmptyBlocksDoubleSign(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + maxShards := uint32(1) + shardId := uint32(0) + numNodesPerShard := 6 + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + + nodes := make([]*integrationTests.TestProcessorNode, numNodesPerShard) + for i := 0; i < numNodesPerShard; i++ { + nodes[i] = integrationTests.NewTestSyncNode( + maxShards, + shardId, + shardId, + advertiserAddr, + ) + } + + idxProposerShard0 := 0 + idxProposers := []int{idxProposerShard0} + + defer func() { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Messenger.Close() + } + }() + + for _, n := range nodes { + _ = n.Messenger.Bootstrap() + _ = n.StartSync() + } + + fmt.Println("Delaying for nodes p2p bootstrap...") + time.Sleep(delayP2pBootstrap) + + round := uint64(0) + nonce := uint64(0) + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + nonce++ + + numRoundsToTest := 2 + for i := 0; i < numRoundsToTest; i++ { + integrationTests.ProposeBlock(nodes, idxProposers, round, nonce) + + time.Sleep(stepSync) round = integrationTests.IncrementAndPrintRound(round) updateRound(nodes, round) nonce++ } + time.Sleep(stepSync) + + pubKeysVariant1 := []byte{3} + pubKeysVariant2 := []byte{1} + + proposeBlockWithPubKeyBitmap(nodes[idxProposerShard0], round, nonce, pubKeysVariant1) + proposeBlockWithPubKeyBitmap(nodes[1], round, nonce, pubKeysVariant2) + time.Sleep(stepDelay) + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + + stepDelayForkResolving := 4 * stepDelay + time.Sleep(stepDelayForkResolving) + testAllNodesHaveTheSameBlockHeightInBlockchain(t, nodes) + testAllNodesHaveSameLastBlock(t, nodes) +} + +func proposeBlockWithPubKeyBitmap(n *integrationTests.TestProcessorNode, round uint64, nonce uint64, pubKeys []byte) { + body, header, _ := n.ProposeBlock(round, nonce) + header.SetPubKeysBitmap(pubKeys) + n.BroadcastBlock(body, header) + n.CommitBlock(body, header) } func testAllNodesHaveTheSameBlockHeightInBlockchain(t *testing.T, nodes []*integrationTests.TestProcessorNode) { @@ -97,8 +181,15 @@ func testAllNodesHaveTheSameBlockHeightInBlockchain(t *testing.T, nodes []*integ } } -func updateRound(nodes []*integrationTests.TestProcessorNode, round uint64) { +func testAllNodesHaveSameLastBlock(t *testing.T, nodes []*integrationTests.TestProcessorNode) { + mapBlocksByHash := make(map[string]data.HeaderHandler) + for _, n := range nodes { - n.Rounder.IndexField = int64(round) + hdr := n.BlockChain.GetCurrentBlockHeader() + buff, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, hdr) + + mapBlocksByHash[string(buff)] = hdr } + + assert.Equal(t, 1, len(mapBlocksByHash)) } diff --git a/integrationTests/sync/common.go b/integrationTests/sync/common.go new file mode 100644 index 00000000000..076639237fc --- /dev/null +++ b/integrationTests/sync/common.go @@ -0,0 +1,176 @@ +package sync + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +var stepDelay = time.Second +var delayP2pBootstrap = time.Second * 2 +var stepSync = time.Second * 2 + +func setupSyncNodesOneShardAndMeta( + numNodesPerShard int, + numNodesMeta int, +) ([]*integrationTests.TestProcessorNode, p2p.Messenger, []int) { + + maxShards := uint32(1) + shardId := uint32(0) + + advertiser := integrationTests.CreateMessengerWithKadDht(context.Background(), "") + _ = advertiser.Bootstrap() + advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + + nodes := make([]*integrationTests.TestProcessorNode, 0) + for i := 0; i < numNodesPerShard; i++ { + shardNode := integrationTests.NewTestSyncNode( + maxShards, + shardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, shardNode) + } + idxProposerShard0 := 0 + + for i := 0; i < numNodesMeta; i++ { + metaNode := integrationTests.NewTestSyncNode( + maxShards, + sharding.MetachainShardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, metaNode) + } + idxProposerMeta := len(nodes) - 1 + + idxProposers := []int{idxProposerShard0, idxProposerMeta} + + return nodes, advertiser, idxProposers +} + +func startSyncingBlocks(nodes []*integrationTests.TestProcessorNode) { + for _, n := range nodes { + _ = n.StartSync() + } + + fmt.Println("Delaying for nodes to start syncing blocks...") + time.Sleep(stepDelay) +} + +func updateRound(nodes []*integrationTests.TestProcessorNode, round uint64) { + for _, n := range nodes { + n.Rounder.IndexField = int64(round) + } +} + +func proposeAndSyncBlocks( + nodes []*integrationTests.TestProcessorNode, + round *uint64, + idxProposers []int, + nonces []*uint64, + numOfRounds int, +) { + + for i := 0; i < numOfRounds; i++ { + crtRound := atomic.LoadUint64(round) + proposeBlocks(nodes, idxProposers, nonces, crtRound) + + time.Sleep(stepSync) + + crtRound = integrationTests.IncrementAndPrintRound(crtRound) + atomic.StoreUint64(round, crtRound) + updateRound(nodes, crtRound) + incrementNonces(nonces) + } + time.Sleep(stepSync) +} + +func incrementNonces(nonces []*uint64) { + for i := 0; i < len(nonces); i++ { + atomic.AddUint64(nonces[i], 1) + } +} + +func proposeBlocks( + nodes []*integrationTests.TestProcessorNode, + idxProposers []int, + nonces []*uint64, + crtRound uint64, +) { + for idx, proposer := range idxProposers { + crtNonce := atomic.LoadUint64(nonces[idx]) + integrationTests.ProposeBlock(nodes, []int{proposer}, crtRound, crtNonce) + } +} + +func forkChoiceOneBlock(nodes []*integrationTests.TestProcessorNode, shardId uint32) { + for idx, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + err := n.Bootstrapper.ForkChoice(false) + if err != nil { + fmt.Println(err) + } + + newNonce := n.BlockChain.GetCurrentBlockHeader().GetNonce() + fmt.Printf("Node's id %d is at block height %d\n", idx, newNonce) + } +} + +func emptyDataPools(nodes []*integrationTests.TestProcessorNode, shardId uint32) { + for _, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + + emptyNodeDataPool(n) + } +} + +func emptyNodeDataPool(node *integrationTests.TestProcessorNode) { + if node.ShardDataPool != nil { + emptyShardDataPool(node.ShardDataPool) + } + if node.MetaDataPool != nil { + emptyMetaDataPool(node.MetaDataPool) + } +} + +func emptyShardDataPool(sdp dataRetriever.PoolsHolder) { + sdp.HeadersNonces().Clear() + sdp.Headers().Clear() + sdp.UnsignedTransactions().Clear() + sdp.Transactions().Clear() + sdp.MetaBlocks().Clear() + sdp.MiniBlocks().Clear() + sdp.PeerChangesBlocks().Clear() +} + +func emptyMetaDataPool(holder dataRetriever.MetaPoolsHolder) { + holder.HeadersNonces().Clear() + holder.MetaChainBlocks().Clear() + holder.MiniBlockHashes().Clear() + holder.ShardHeaders().Clear() +} + +func resetHighestProbableNonce(nodes []*integrationTests.TestProcessorNode, shardId uint32, targetNonce uint64) { + for _, n := range nodes { + if n.ShardCoordinator.SelfId() != shardId { + continue + } + if n.BlockChain.GetCurrentBlockHeader().GetNonce() != targetNonce { + continue + } + + n.Bootstrapper.SetProbableHighestNonce(targetNonce) + } +} diff --git a/integrationTests/sync/edgeCases_test.go b/integrationTests/sync/edgeCases_test.go new file mode 100644 index 00000000000..42537b2919e --- /dev/null +++ b/integrationTests/sync/edgeCases_test.go @@ -0,0 +1,112 @@ +package sync + +import ( + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/integrationTests" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +// TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard tests the following scenario: +// 1. Meta and shard 0 are in sync, producing blocks +// 2. At nonce 3, shard 0 makes a rollback and stops producing blocks for 2 rounds, meta keeps producing blocks +// 3. Shard 0 resumes creating blocks starting with nonce 3 +// 3. A bootstrapping meta node should be able to pass meta block with nonce 2 +func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + numNodesPerShard := 3 + numNodesMeta := 3 + + nodes, advertiser, idxProposers := setupSyncNodesOneShardAndMeta(numNodesPerShard, numNodesMeta) + idxProposerMeta := idxProposers[1] + defer integrationTests.CloseProcessorNodes(nodes, advertiser) + + integrationTests.StartP2pBootstrapOnProcessorNodes(nodes) + startSyncingBlocks(nodes) + + round := uint64(0) + idxNonceShard := 0 + idxNonceMeta := 1 + nonces := []*uint64{new(uint64), new(uint64)} + + round = integrationTests.IncrementAndPrintRound(round) + updateRound(nodes, round) + incrementNonces(nonces) + + numRoundsBlocksAreProposedCorrectly := 3 + proposeAndSyncBlocks( + nodes, + &round, + idxProposers, + nonces, + numRoundsBlocksAreProposedCorrectly, + ) + + shardIdToRollbackLastBlock := uint32(0) + forkChoiceOneBlock(nodes, shardIdToRollbackLastBlock) + resetHighestProbableNonce(nodes, shardIdToRollbackLastBlock, 2) + emptyDataPools(nodes, shardIdToRollbackLastBlock) + + //revert also the nonce, so the same block nonce will be used when shard will propose the next block + atomic.AddUint64(nonces[idxNonceShard], ^uint64(0)) + + numRoundsBlocksAreProposedOnlyByMeta := 2 + proposeAndSyncBlocks( + nodes, + &round, + []int{idxProposerMeta}, + []*uint64{nonces[idxNonceMeta]}, + numRoundsBlocksAreProposedOnlyByMeta, + ) + + secondNumRoundsBlocksAreProposedCorrectly := 2 + proposeAndSyncBlocks( + nodes, + &round, + idxProposers, + nonces, + secondNumRoundsBlocksAreProposedCorrectly, + ) + + maxShards := uint32(1) + shardId := uint32(0) + advertiserAddr := integrationTests.GetConnectableAddress(advertiser) + syncMetaNode := integrationTests.NewTestSyncNode( + maxShards, + sharding.MetachainShardId, + shardId, + advertiserAddr, + ) + nodes = append(nodes, syncMetaNode) + syncMetaNode.Rounder.IndexField = int64(round) + + syncNodesSlice := []*integrationTests.TestProcessorNode{syncMetaNode} + integrationTests.StartP2pBootstrapOnProcessorNodes(syncNodesSlice) + startSyncingBlocks(syncNodesSlice) + + //after joining the network we must propose a new block on the metachain as to be received by the sync + //node and to start the bootstrapping process + proposeAndSyncBlocks( + nodes, + &round, + []int{idxProposerMeta}, + []*uint64{nonces[idxNonceMeta]}, + 1, + ) + + numOfRoundsToWaitToCatchUp := numRoundsBlocksAreProposedCorrectly + + numRoundsBlocksAreProposedOnlyByMeta + + secondNumRoundsBlocksAreProposedCorrectly + time.Sleep(stepSync * time.Duration(numOfRoundsToWaitToCatchUp)) + updateRound(nodes, round) + + nonceProposerMeta := nodes[idxProposerMeta].BlockChain.GetCurrentBlockHeader().GetNonce() + nonceSyncNode := syncMetaNode.BlockChain.GetCurrentBlockHeader().GetNonce() + assert.Equal(t, nonceProposerMeta, nonceSyncNode) +} diff --git a/integrationTests/testGameHelperFunctions.go b/integrationTests/testGameHelperFunctions.go index dbfdbc72579..b574e7576cf 100644 --- a/integrationTests/testGameHelperFunctions.go +++ b/integrationTests/testGameHelperFunctions.go @@ -25,7 +25,7 @@ func DeployScTx(nodes []*TestProcessorNode, senderIdx int, scCode string) { sndAddr: nodes[senderIdx].OwnAccount.PkTxSignBytes, data: scCode + "@" + hex.EncodeToString(factory.IELEVirtualMachine), gasLimit: 100000, - gasPrice: 0, + gasPrice: MinTxGasPrice, }) nodes[senderIdx].OwnAccount.Nonce++ _, _ = nodes[senderIdx].SendTransaction(txDeploy) @@ -55,7 +55,7 @@ func PlayerJoinsGame( sndAddr: player.Address.Bytes(), data: fmt.Sprintf("joinGame@%s", round), gasLimit: 5000, - gasPrice: 0, + gasPrice: MinTxGasPrice, }) player.Nonce++ newBalance := big.NewInt(0) @@ -87,7 +87,7 @@ func NodeCallsRewardAndSend( sndAddr: nodes[idxNodeOwner].OwnAccount.PkTxSignBytes, data: fmt.Sprintf("rewardAndSendToWallet@%s@%s@%X", round, hex.EncodeToString(winnerAddress), prize), gasLimit: 30000, - gasPrice: 0, + gasPrice: MinTxGasPrice, }) nodes[idxNodeOwner].OwnAccount.Nonce++ @@ -123,6 +123,7 @@ func NodeDoesWithdraw( sndAddr: nodes[idxNode].OwnAccount.PkTxSignBytes, data: fmt.Sprintf("withdraw@%X", withdrawValue), gasLimit: 5000, + gasPrice: MinTxGasPrice, }) nodes[idxNode].OwnAccount.Nonce++ _, _ = nodes[idxNode].SendTransaction(txScCall) @@ -150,6 +151,7 @@ func NodeDoesTopUp( sndAddr: nodes[idxNode].OwnAccount.PkTxSignBytes, data: fmt.Sprintf("topUp"), gasLimit: 5000, + gasPrice: MinTxGasPrice, }) nodes[idxNode].OwnAccount.Nonce++ _, _ = nodes[idxNode].SendTransaction(txScCall) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 4b0b0597439..423784daf41 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -31,6 +31,7 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/display" + "github.com/ElrondNetwork/elrond-go/hashing/sha256" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/node" "github.com/ElrondNetwork/elrond-go/p2p" @@ -89,29 +90,31 @@ func CreateMessengerWithKadDht(ctx context.Context, initialAddr string) p2p.Mess // CreateTestShardDataPool creates a test data pool for shard nodes func CreateTestShardDataPool(txPool dataRetriever.ShardedDataCacherNotifier) dataRetriever.PoolsHolder { if txPool == nil { - txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + txPool, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) } - uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) - cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache} + uTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1}) + rewardsTxPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 300, Type: storageUnit.LRUCache, Shards: 1}) + cacherCfg := storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache, Shards: 1} hdrPool, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} hdrNoncesCacher, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) hdrNonces, _ := dataPool.NewNonceSyncMapCacher(hdrNoncesCacher, uint64ByteSlice.NewBigEndianConverter()) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} txBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} peerChangeBlockBody, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) - cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache} + cacherCfg = storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: 1} metaBlocks, _ := storageUnit.NewCache(cacherCfg.Type, cacherCfg.Size, cacherCfg.Shards) dPool, _ := dataPool.NewShardedDataPool( txPool, uTxPool, + rewardsTxPool, hdrPool, hdrNonces, txBlockBody, @@ -165,6 +168,7 @@ func CreateShardStore(numOfShards uint32) dataRetriever.StorageService { store.AddStorer(dataRetriever.PeerChangesUnit, CreateMemUnit()) store.AddStorer(dataRetriever.BlockHeaderUnit, CreateMemUnit()) store.AddStorer(dataRetriever.UnsignedTransactionUnit, CreateMemUnit()) + store.AddStorer(dataRetriever.RewardTransactionUnit, CreateMemUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, CreateMemUnit()) for i := uint32(0); i < numOfShards; i++ { @@ -189,18 +193,13 @@ func CreateMetaStore(coordinator sharding.Coordinator) dataRetriever.StorageServ } // CreateAccountsDB creates an account state with a valid trie implementation but with a memory storage -func CreateAccountsDB(shardCoordinator sharding.Coordinator) (*state.AccountsDB, data.Trie, storage.Storer) { - - var accountFactory state.AccountFactory - if shardCoordinator == nil { - accountFactory = factory.NewAccountCreator() - } else { - accountFactory, _ = factory.NewAccountFactoryCreator(shardCoordinator) - } - +func CreateAccountsDB(accountType factory.Type) (*state.AccountsDB, data.Trie, storage.Storer) { + hasher := sha256.Sha256{} store := CreateMemUnit() - tr, _ := trie.NewTrie(store, TestMarshalizer, TestHasher) - adb, _ := state.NewAccountsDB(tr, TestHasher, TestMarshalizer, accountFactory) + + tr, _ := trie.NewTrie(store, TestMarshalizer, hasher) + accountFactory, _ := factory.NewAccountFactoryCreator(accountType) + adb, _ := state.NewAccountsDB(tr, sha256.Sha256{}, TestMarshalizer, accountFactory) return adb, tr, store } @@ -368,7 +367,7 @@ func CreateRandomHexString(chars int) string { // GenerateAddressJournalAccountAccountsDB returns an account, the accounts address, and the accounts database func GenerateAddressJournalAccountAccountsDB() (state.AddressContainer, state.AccountHandler, *state.AccountsDB) { adr := CreateRandomAddress() - adb, _, _ := CreateAccountsDB(nil) + adb, _, _ := CreateAccountsDB(factory.UserAccount) account, _ := state.NewAccount(adr, adb) return adr, account, adb @@ -423,7 +422,27 @@ func AdbEmulateBalanceTxExecution(acntSrc, acntDest *state.Account, value *big.I // CreateSimpleTxProcessor returns a transaction processor func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionProcessor { shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - txProcessor, _ := txProc.NewTxProcessor(accnts, TestHasher, TestAddressConverter, TestMarshalizer, shardCoordinator, &mock.SCProcessorMock{}) + txProcessor, _ := txProc.NewTxProcessor( + accnts, + TestHasher, + TestAddressConverter, + TestMarshalizer, + shardCoordinator, + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + }, + ) return txProcessor } @@ -493,10 +512,15 @@ func IncrementAndPrintRound(round uint64) uint64 { return round } -// ProposeBlock proposes a block with SC txs for every shard +// ProposeBlock proposes a block for every shard func ProposeBlock(nodes []*TestProcessorNode, idxProposers []int, round uint64, nonce uint64) { fmt.Println("All shards propose blocks...") + for idx, n := range nodes { + // set the consensus reward addresses as rewards processor expects at least valid round + // otherwise the produced rewards will not be valid on verification + n.BlockProcessor.SetConsensusData([]byte("randomness"), round, 0, n.ShardCoordinator.SelfId()) + if !IsIntInSlice(idx, idxProposers) { continue } @@ -660,9 +684,9 @@ func CreateNodes( nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) idx := 0 - for shardId := 0; shardId < numOfShards; shardId++ { + for shardId := uint32(0); shardId < uint32(numOfShards); shardId++ { for j := 0; j < nodesPerShard; j++ { - n := NewTestProcessorNode(uint32(numOfShards), uint32(shardId), uint32(shardId), serviceID) + n := NewTestProcessorNode(uint32(numOfShards), shardId, shardId, serviceID) nodes[idx] = n idx++ @@ -671,7 +695,7 @@ func CreateNodes( for i := 0; i < numMetaChainNodes; i++ { metaNode := NewTestProcessorNode(uint32(numOfShards), sharding.MetachainShardId, 0, serviceID) - idx := i + numOfShards*nodesPerShard + idx = i + numOfShards*nodesPerShard nodes[idx] = metaNode } @@ -701,27 +725,20 @@ func DisplayAndStartNodes(nodes []*TestProcessorNode) { func GenerateAndDisseminateTxs( n *TestProcessorNode, senders []crypto.PrivateKey, - receiversPrivateKeys map[uint32][]crypto.PrivateKey, + receiversPublicKeysMap map[uint32][]crypto.PublicKey, valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, ) { for i := 0; i < len(senders); i++ { senderKey := senders[i] - incrementalNonce := uint64(0) - for _, recvPrivateKeys := range receiversPrivateKeys { - receiverKey := recvPrivateKeys[i] - tx := generateTx( - senderKey, - n.OwnAccount.SingleSigner, - &txArgs{ - nonce: incrementalNonce, - value: valToTransfer, - rcvAddr: skToPk(receiverKey), - sndAddr: skToPk(senderKey), - }, - ) + incrementalNonce := make([]uint64, len(senders)) + for _, shardReceiversPublicKeys := range receiversPublicKeysMap { + receiverPubKey := shardReceiversPublicKeys[i] + tx := generateTransferTx(incrementalNonce[i], senderKey, receiverPubKey, valToTransfer, gasPrice, gasLimit) _, _ = n.SendTransaction(tx) - incrementalNonce++ + incrementalNonce[i]++ } } } @@ -732,8 +749,34 @@ type txArgs struct { rcvAddr []byte sndAddr []byte data string - gasPrice int - gasLimit int + gasPrice uint64 + gasLimit uint64 +} + +func generateTransferTx( + nonce uint64, + senderPrivateKey crypto.PrivateKey, + receiverPublicKey crypto.PublicKey, + valToTransfer *big.Int, + gasPrice uint64, + gasLimit uint64, +) *transaction.Transaction { + + receiverPubKeyBytes, _ := receiverPublicKey.ToByteArray() + tx := transaction.Transaction{ + Nonce: nonce, + Value: valToTransfer, + RcvAddr: receiverPubKeyBytes, + SndAddr: skToPk(senderPrivateKey), + Data: "", + GasLimit: gasLimit, + GasPrice: gasPrice, + } + txBuff, _ := TestMarshalizer.Marshal(&tx) + signer := &singlesig.SchnorrSigner{} + tx.Signature, _ = signer.Sign(senderPrivateKey, txBuff) + + return &tx } func generateTx( @@ -746,8 +789,8 @@ func generateTx( Value: args.value, RcvAddr: args.rcvAddr, SndAddr: args.sndAddr, - GasPrice: uint64(args.gasPrice), - GasLimit: uint64(args.gasLimit), + GasPrice: args.gasPrice, + GasLimit: args.gasLimit, Data: args.data, } txBuff, _ := TestMarshalizer.Marshal(tx) @@ -761,6 +804,14 @@ func skToPk(sk crypto.PrivateKey) []byte { return pkBuff } +// TestPublicKeyHasBalance checks if the account corresponding to the given public key has the expected balance +func TestPublicKeyHasBalance(t *testing.T, n *TestProcessorNode, pk crypto.PublicKey, expectedBalance *big.Int) { + pkBuff, _ := pk.ToByteArray() + addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBuff) + account, _ := n.AccntState.GetExistingAccount(addr) + assert.Equal(t, expectedBalance, account.(*state.Account).Balance) +} + // TestPrivateKeyHasBalance checks if the private key has the expected balance func TestPrivateKeyHasBalance(t *testing.T, n *TestProcessorNode, sk crypto.PrivateKey, expectedBalance *big.Int) { pkBuff, _ := sk.GeneratePublic().ToByteArray() @@ -795,6 +846,11 @@ func GenerateSkAndPkInShard( keyGen := signing.NewKeyGenerator(suite) sk, pk := keyGen.GeneratePair() + if shardId == sharding.MetachainShardId { + // for metachain generate in shard 0 + shardId = 0 + } + for { pkBytes, _ := pk.ToByteArray() addr, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkBytes) @@ -807,6 +863,55 @@ func GenerateSkAndPkInShard( return sk, pk, keyGen } +// CreateSendersAndReceiversInShard creates given number of sender private key and receiver public key pairs, +// with account in same shard as given node +func CreateSendersAndReceiversInShard( + nodeInShard *TestProcessorNode, + nbSenderReceiverPairs uint32, +) ([]crypto.PrivateKey, []crypto.PublicKey) { + shardId := nodeInShard.ShardCoordinator.SelfId() + receiversPublicKeys := make([]crypto.PublicKey, nbSenderReceiverPairs) + sendersPrivateKeys := make([]crypto.PrivateKey, nbSenderReceiverPairs) + + for i := uint32(0); i < nbSenderReceiverPairs; i++ { + sendersPrivateKeys[i], _, _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + _, receiversPublicKeys[i], _ = GenerateSkAndPkInShard(nodeInShard.ShardCoordinator, shardId) + } + + return sendersPrivateKeys, receiversPublicKeys +} + +// CreateAndSendTransactions creates and sends transactions between given senders and receivers. +func CreateAndSendTransactions( + nodes map[uint32][]*TestProcessorNode, + sendersPrivKeysMap map[uint32][]crypto.PrivateKey, + receiversPubKeysMap map[uint32][]crypto.PublicKey, + gasPricePerTx uint64, + gasLimitPerTx uint64, + valueToTransfer *big.Int, +) { + for shardId := range nodes { + if shardId == sharding.MetachainShardId { + continue + } + + nodeInShard := nodes[shardId][0] + + fmt.Println("Generating transactions...") + GenerateAndDisseminateTxs( + nodeInShard, + sendersPrivKeysMap[shardId], + receiversPubKeysMap, + valueToTransfer, + gasPricePerTx, + gasLimitPerTx, + ) + } + + fmt.Println("Delaying for disseminating transactions...") + time.Sleep(time.Second * 5) +} + // CreateMintingForSenders creates account with balances for every node in a given shard func CreateMintingForSenders( nodes []*TestProcessorNode, @@ -901,7 +1006,6 @@ func getMissingTxsForNode(n *TestProcessorNode, generatedTxHashes [][]byte) [][] for i := 0; i < len(generatedTxHashes); i++ { _, ok := n.ShardDataPool.Transactions().SearchFirstData(generatedTxHashes[i]) if !ok { - //tx is still missing neededTxs = append(neededTxs, generatedTxHashes[i]) } } @@ -918,40 +1022,33 @@ func requestMissingTransactions(n *TestProcessorNode, shardResolver uint32, need } // CreateRequesterDataPool creates a datapool with a mock txPool -func CreateRequesterDataPool( - t *testing.T, - recvTxs map[int]map[string]struct{}, - mutRecvTxs *sync.Mutex, - nodeIndex int, -) dataRetriever.PoolsHolder { +func CreateRequesterDataPool(t *testing.T, recvTxs map[int]map[string]struct{}, mutRecvTxs *sync.Mutex, nodeIndex int) dataRetriever.PoolsHolder { //not allowed to request data from the same shard - return CreateTestShardDataPool( - &mock.ShardedDataStub{ - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil, false - }, - ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { - assert.Fail(t, "same-shard requesters should not be queried") - return nil - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - mutRecvTxs.Lock() - defer mutRecvTxs.Unlock() - - txMap := recvTxs[nodeIndex] - if txMap == nil { - txMap = make(map[string]struct{}) - recvTxs[nodeIndex] = txMap - } + return CreateTestShardDataPool(&mock.ShardedDataStub{ + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil, false + }, + ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { + assert.Fail(t, "same-shard requesters should not be queried") + return nil + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + mutRecvTxs.Lock() + defer mutRecvTxs.Unlock() + + txMap := recvTxs[nodeIndex] + if txMap == nil { + txMap = make(map[string]struct{}) + recvTxs[nodeIndex] = txMap + } - txMap[string(key)] = struct{}{} - }, - RegisterHandlerCalled: func(i func(key []byte)) { - }, + txMap[string(key)] = struct{}{} }, - ) + RegisterHandlerCalled: func(i func(key []byte)) { + }, + }) } // CreateResolversDataPool creates a datapool containing a given number of transactions @@ -991,7 +1088,7 @@ func generateValidTx( _, pkRecv, _ := GenerateSkAndPkInShard(shardCoordinator, receiverShardId) pkRecvBuff, _ := pkRecv.ToByteArray() - accnts, _, _ := CreateAccountsDB(shardCoordinator) + accnts, _, _ := CreateAccountsDB(factory.UserAccount) addrSender, _ := TestAddressConverter.CreateAddressFromPublicKeyBytes(pkSenderBuff) _, _ = accnts.GetAccountWithJournal(addrSender) _, _ = accnts.Commit() @@ -1116,3 +1213,93 @@ func WaitForBootstrapAndShowConnected(peers []p2p.Messenger, durationBootstrapin fmt.Printf("Peer %s is connected to %d peers\n", peer.ID().Pretty(), len(peer.ConnectedPeers())) } } + +// PubKeysMapFromKeysMap returns a map of public keys per shard from the key pairs per shard map. +func PubKeysMapFromKeysMap(keyPairMap map[uint32][]*TestKeyPair) map[uint32][]string { + keysMap := make(map[uint32][]string, 0) + + for shardId, pairList := range keyPairMap { + shardKeys := make([]string, len(pairList)) + for i, pair := range pairList { + b, _ := pair.Pk.ToByteArray() + shardKeys[i] = string(b) + } + keysMap[shardId] = shardKeys + } + + return keysMap +} + +// GenValidatorsFromPubKeys generates a map of validators per shard out of public keys map +func GenValidatorsFromPubKeys(pubKeysMap map[uint32][]string, nbShards uint32) map[uint32][]sharding.Validator { + validatorsMap := make(map[uint32][]sharding.Validator) + + for shardId, shardNodesPks := range pubKeysMap { + shardValidators := make([]sharding.Validator, 0) + shardCoordinator, _ := sharding.NewMultiShardCoordinator(nbShards, shardId) + for i := 0; i < len(shardNodesPks); i++ { + _, pk, _ := GenerateSkAndPkInShard(shardCoordinator, shardId) + address, err := pk.ToByteArray() + if err != nil { + return nil + } + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(shardNodesPks[i]), address) + shardValidators = append(shardValidators, v) + } + validatorsMap[shardId] = shardValidators + } + + return validatorsMap +} + +// CreateCryptoParams generates the crypto parameters (key pairs, key generator and suite) for multiple nodes +func CreateCryptoParams(nodesPerShard int, nbMetaNodes int, nbShards uint32) *CryptoParams { + suite := kyber.NewSuitePairingBn256() + singleSigner := &singlesig.SchnorrSigner{} + keyGen := signing.NewKeyGenerator(suite) + + keysMap := make(map[uint32][]*TestKeyPair) + keyPairs := make([]*TestKeyPair, nodesPerShard) + for shardId := uint32(0); shardId < nbShards; shardId++ { + for n := 0; n < nodesPerShard; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[shardId] = keyPairs + } + + keyPairs = make([]*TestKeyPair, nbMetaNodes) + for n := 0; n < nbMetaNodes; n++ { + kp := &TestKeyPair{} + kp.Sk, kp.Pk = keyGen.GeneratePair() + keyPairs[n] = kp + } + keysMap[sharding.MetachainShardId] = keyPairs + + params := &CryptoParams{ + Keys: keysMap, + KeyGen: keyGen, + SingleSigner: singleSigner, + } + + return params +} + +// CloseProcessorNodes closes the used TestProcessorNodes and advertiser +func CloseProcessorNodes(nodes []*TestProcessorNode, advertiser p2p.Messenger) { + _ = advertiser.Close() + for _, n := range nodes { + _ = n.Messenger.Close() + } +} + +// StartP2pBootstrapOnProcessorNodes will start the p2p discovery on processor nodes and wait a predefined time +func StartP2pBootstrapOnProcessorNodes(nodes []*TestProcessorNode) { + for _, n := range nodes { + _ = n.Messenger.Bootstrap() + } + + fmt.Println("Delaying for nodes p2p bootstrap...") + time.Sleep(p2pBootstrapStepDelay) +} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 1a0fe3fdb3a..b091f4bdb29 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -4,13 +4,16 @@ import ( "context" "encoding/hex" "fmt" + "strconv" "sync/atomic" "time" + "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/partitioning" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" dataBlock "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/state" @@ -31,14 +34,16 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/coordinator" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" metaProcess "github.com/ElrondNetwork/elrond-go/process/factory/metachain" "github.com/ElrondNetwork/elrond-go/process/factory/shard" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/sharding" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" ) @@ -57,13 +62,38 @@ var TestMultiSig = mock.NewMultiSigner(1) // TestUint64Converter represents an uint64 to byte slice converter var TestUint64Converter = uint64ByteSlice.NewBigEndianConverter() +// MinTxGasPrice minimum gas price required by a transaction +//TODO refactor all tests to pass with a non zero value +var MinTxGasPrice = uint64(0) + +// MinTxGasLimit minimum gas limit required by a transaction +var MinTxGasLimit = uint64(4) + +const maxTxNonceDeltaAllowed = 8000 + +// TestKeyPair holds a pair of private/public Keys +type TestKeyPair struct { + Sk crypto.PrivateKey + Pk crypto.PublicKey +} + +//CryptoParams holds crypto parametres +type CryptoParams struct { + KeyGen crypto.KeyGenerator + Keys map[uint32][]*TestKeyPair + SingleSigner crypto.SingleSigner +} + // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { - ShardCoordinator sharding.Coordinator - Messenger p2p.Messenger + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Messenger p2p.Messenger OwnAccount *TestWalletAccount + NodeKeys *TestKeyPair ShardDataPool dataRetriever.PoolsHolder MetaDataPool dataRetriever.MetaPoolsHolder @@ -72,6 +102,8 @@ type TestProcessorNode struct { BlockChain data.ChainHandler GenesisBlocks map[uint32]data.HeaderHandler + EconomicsData *economics.EconomicsData + InterceptorsContainer process.InterceptorsContainer ResolversContainer dataRetriever.ResolversContainer ResolverFinder dataRetriever.ResolversFinder @@ -86,15 +118,17 @@ type TestProcessorNode struct { BlockchainHook vmcommon.BlockchainHook ArgsParser process.ArgumentsParser ScProcessor process.SmartContractProcessor + RewardsProcessor process.RewardTransactionProcessor PreProcessorsContainer process.PreProcessorsContainer ForkDetector process.ForkDetector - BlockTracker process.BlocksTracker BlockProcessor process.BlockProcessor BroadcastMessenger consensus.BroadcastMessenger - Bootstrapper process.Bootstrapper + Bootstrapper TestBootstrapper Rounder *mock.RounderMock + MultiSigner crypto.MultiSigner + //Node is used to call the functionality already implemented in it Node *node.Node ScDataGetter external.ScDataGetter @@ -105,16 +139,31 @@ type TestProcessorNode struct { CounterMetaRcv int32 } -// NewTestProcessorNode returns a new TestProcessorNode instance without sync capabilities -func NewTestProcessorNode(maxShards uint32, nodeShardId uint32, txSignPrivKeyShardId uint32, initialNodeAddr string) *TestProcessorNode { +// NewTestProcessorNode returns a new TestProcessorNode instance +func NewTestProcessorNode( + maxShards uint32, + nodeShardId uint32, + txSignPrivKeyShardId uint32, + initialNodeAddr string, +) *TestProcessorNode { + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, } + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) tpn.initDataPools() tpn.initTestNode() @@ -127,11 +176,21 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + nodesCoordinator := &mock.NodesCoordinatorMock{} + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, } + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, + } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) if tpn.ShardCoordinator.SelfId() != sharding.MetachainShardId { tpn.ShardDataPool = dPool @@ -144,11 +203,16 @@ func NewTestProcessorNodeWithCustomDataPool(maxShards uint32, nodeShardId uint32 } func (tpn *TestProcessorNode) initTestNode() { - tpn.initRounder() + tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( + TestAddressConverter, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + ) tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(tpn.ShardCoordinator) + tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) + tpn.initEconomicsData() tpn.initInterceptors() tpn.initResolvers() tpn.initInnerProcessors() @@ -174,10 +238,6 @@ func (tpn *TestProcessorNode) initDataPools() { } } -func (tpn *TestProcessorNode) initRounder() { - tpn.Rounder = &mock.RounderMock{} -} - func (tpn *TestProcessorNode) initStorage() { if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.Storage = CreateMetaStore(tpn.ShardCoordinator) @@ -194,18 +254,44 @@ func (tpn *TestProcessorNode) initChainHandler() { } } +func (tpn *TestProcessorNode) initEconomicsData() { + mingGasPrice := strconv.FormatUint(MinTxGasPrice, 10) + minGasLimit := strconv.FormatUint(MinTxGasLimit, 10) + + economicsData, _ := economics.NewEconomicsData( + &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{ + CommunityAddress: "addr1", + BurnAddress: "addr2", + }, + RewardsSettings: config.RewardsSettings{ + RewardsValue: "1000", + CommunityPercentage: 0.10, + LeaderPercentage: 0.50, + BurnPercentage: 0.40, + }, + FeeSettings: config.FeeSettings{ + MinGasPrice: mingGasPrice, + MinGasLimit: minGasLimit, + }, + }, + ) + + tpn.EconomicsData = economicsData +} + func (tpn *TestProcessorNode) initInterceptors() { var err error if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { interceptorContainerFactory, _ := metaProcess.NewInterceptorsContainerFactory( tpn.ShardCoordinator, + tpn.NodesCoordinator, tpn.Messenger, tpn.Storage, TestMarshalizer, TestHasher, TestMultiSig, tpn.MetaDataPool, - &mock.ChronologyValidatorMock{}, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -216,6 +302,7 @@ func (tpn *TestProcessorNode) initInterceptors() { interceptorContainerFactory, _ := shard.NewInterceptorsContainerFactory( tpn.AccntState, tpn.ShardCoordinator, + tpn.NodesCoordinator, tpn.Messenger, tpn.Storage, TestMarshalizer, @@ -225,7 +312,8 @@ func (tpn *TestProcessorNode) initInterceptors() { TestMultiSig, tpn.ShardDataPool, TestAddressConverter, - &mock.ChronologyValidatorMock{}, + maxTxNonceDeltaAllowed, + tpn.EconomicsData, ) tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() @@ -252,8 +340,8 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.ResolverFinder, _ = containers.NewResolversFinder(tpn.ResolversContainer, tpn.ShardCoordinator) tpn.RequestHandler, _ = requestHandlers.NewMetaResolverRequestHandler( tpn.ResolverFinder, - factory.HeadersTopic, factory.ShardHeadersForMetachainTopic, + factory.MetachainBlocksTopic, ) } else { resolversContainerFactory, _ := factoryDataRetriever.NewResolversContainerFactory( @@ -272,6 +360,7 @@ func (tpn *TestProcessorNode) initResolvers() { tpn.ResolverFinder, factory.TransactionTopic, factory.UnsignedTransactionTopic, + factory.RewardsTransactionTopic, factory.MiniBlocksTopic, factory.HeadersTopic, factory.MetachainBlocksTopic, @@ -290,10 +379,24 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, TestHasher, TestAddressConverter, + tpn.SpecialAddressHandler, tpn.Storage, + tpn.ShardDataPool, + tpn.EconomicsData, ) + tpn.InterimProcContainer, _ = interimProcFactory.Create() tpn.ScrForwarder, _ = tpn.InterimProcContainer.Get(dataBlock.SmartContractResultBlock) + rewardsInter, _ := tpn.InterimProcContainer.Get(dataBlock.RewardsBlock) + rewardsHandler, _ := rewardsInter.(process.TransactionFeeHandler) + internalTxProducer, _ := rewardsInter.(process.InternalTransactionProducer) + + tpn.RewardsProcessor, _ = rewardTransaction.NewRewardTxProcessor( + tpn.AccntState, + TestAddressConverter, + tpn.ShardCoordinator, + rewardsInter, + ) tpn.VmProcessor, tpn.BlockchainHook = CreateIeleVMAndBlockchainHook(tpn.AccntState) tpn.VmDataGetter, _ = CreateIeleVMAndBlockchainHook(tpn.AccntState) @@ -314,8 +417,11 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestAddressConverter, tpn.ShardCoordinator, tpn.ScrForwarder, + rewardsHandler, ) + txTypeHandler, _ := coordinator.NewTxTypeHandler(TestAddressConverter, tpn.ShardCoordinator, tpn.AccntState) + tpn.TxProcessor, _ = transaction.NewTxProcessor( tpn.AccntState, TestHasher, @@ -323,6 +429,19 @@ func (tpn *TestProcessorNode) initInnerProcessors() { TestMarshalizer, tpn.ShardCoordinator, tpn.ScProcessor, + rewardsHandler, + txTypeHandler, + &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + }, ) fact, _ := shard.NewPreProcessorsContainerFactory( @@ -337,6 +456,19 @@ func (tpn *TestProcessorNode) initInnerProcessors() { tpn.TxProcessor, tpn.ScProcessor, tpn.ScProcessor.(process.SmartContractResultProcessor), + tpn.RewardsProcessor, + internalTxProducer, + &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + }, ) tpn.PreProcessorsContainer, _ = fact.Create() @@ -365,48 +497,35 @@ func (tpn *TestProcessorNode) initBlockProcessor() { }, } - tpn.BlockTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, + argumentsBase := block.ArgBaseProcessor{ + Accounts: tpn.AccntState, + ForkDetector: tpn.ForkDetector, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + SpecialAddressHandler: tpn.SpecialAddressHandler, + Uint64Converter: TestUint64Converter, + StartHeaders: tpn.GenesisBlocks, + RequestHandler: tpn.RequestHandler, + Core: nil, } if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { - tpn.BlockProcessor, err = block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - tpn.AccntState, - tpn.MetaDataPool, - tpn.ForkDetector, - tpn.ShardCoordinator, - TestHasher, - TestMarshalizer, - tpn.Storage, - tpn.GenesisBlocks, - tpn.RequestHandler, - TestUint64Converter, - ) + argumentsBase.Core = &mock.ServiceContainerMock{} + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBase, + DataPool: tpn.MetaDataPool, + } + + tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) } else { arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &block.ArgBaseProcessor{ - Accounts: tpn.AccntState, - ForkDetector: tpn.ForkDetector, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - Store: tpn.Storage, - ShardCoordinator: tpn.ShardCoordinator, - Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, - RequestHandler: tpn.RequestHandler, - Core: nil, - }, - DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, - TxCoordinator: tpn.TxCoordinator, + ArgBaseProcessor: argumentsBase, + DataPool: tpn.ShardDataPool, + TxCoordinator: tpn.TxCoordinator, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) @@ -436,12 +555,15 @@ func (tpn *TestProcessorNode) initNode() { node.WithAccountsAdapter(tpn.AccntState), node.WithKeyGen(tpn.OwnAccount.KeygenTxSign), node.WithShardCoordinator(tpn.ShardCoordinator), + node.WithNodesCoordinator(tpn.NodesCoordinator), node.WithBlockChain(tpn.BlockChain), node.WithUint64ByteSliceConverter(TestUint64Converter), - node.WithMultiSigner(TestMultiSig), + node.WithMultiSigner(tpn.MultiSigner), node.WithSingleSigner(tpn.OwnAccount.SingleSigner), node.WithTxSignPrivKey(tpn.OwnAccount.SkTxSign), node.WithTxSignPubKey(tpn.OwnAccount.PkTxSign), + node.WithPrivKey(tpn.NodeKeys.Sk), + node.WithPubKey(tpn.NodeKeys.Pk), node.WithInterceptorsContainer(tpn.InterceptorsContainer), node.WithResolversFinder(tpn.ResolverFinder), node.WithBlockProcessor(tpn.BlockProcessor), @@ -504,11 +626,11 @@ func (tpn *TestProcessorNode) addHandlersForCounters() { tpn.ShardDataPool.UnsignedTransactions().RegisterHandler(txHandler) tpn.ShardDataPool.Transactions().RegisterHandler(txHandler) + tpn.ShardDataPool.RewardTransactions().RegisterHandler(txHandler) tpn.ShardDataPool.Headers().RegisterHandler(hdrHandlers) tpn.ShardDataPool.MetaBlocks().RegisterHandler(metaHandlers) tpn.ShardDataPool.MiniBlocks().RegisterHandler(mbHandlers) } - } // StartSync calls Bootstrapper.StartSync. Errors if bootstrapper is not set @@ -776,3 +898,7 @@ func (tpn *TestProcessorNode) MiniBlocksPresent(hashes [][]byte) bool { return true } + +func (tpn *TestProcessorNode) initRounder() { + tpn.Rounder = &mock.RounderMock{} +} diff --git a/integrationTests/testProcessorNodeWithMultisigner.go b/integrationTests/testProcessorNodeWithMultisigner.go new file mode 100644 index 00000000000..c25c6c1864f --- /dev/null +++ b/integrationTests/testProcessorNodeWithMultisigner.go @@ -0,0 +1,292 @@ +package integrationTests + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/cmd/node/factory" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/crypto" + kmultisig "github.com/ElrondNetwork/elrond-go/crypto/signing/kyber/multisig" + "github.com/ElrondNetwork/elrond-go/crypto/signing/multisig" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/hashing/blake2b" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +// NewTestProcessorNodeWithCustomNodesCoordinator returns a new TestProcessorNode instance with custom NodesCoordinator +func NewTestProcessorNodeWithCustomNodesCoordinator( + maxShards uint32, + nodeShardId uint32, + initialNodeAddr string, + nodesCoordinator sharding.NodesCoordinator, + cp *CryptoParams, + keyIndex int, +) *TestProcessorNode { + + shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + + messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ + ShardCoordinator: shardCoordinator, + Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + tpn.NodeKeys = cp.Keys[nodeShardId][keyIndex] + + llsig := &kmultisig.KyberMultiSignerBLS{} + blsHasher := blake2b.Blake2b{HashSize: factory.BlsHashSize} + + pubKeysMap := PubKeysMapFromKeysMap(cp.Keys) + + tpn.MultiSigner, _ = multisig.NewBLSMultisig( + llsig, + blsHasher, + pubKeysMap[nodeShardId], + tpn.NodeKeys.Sk, + cp.KeyGen, + 0, + ) + if tpn.MultiSigner == nil { + fmt.Println("Error generating multisigner") + } + accountShardId := nodeShardId + if nodeShardId == sharding.MetachainShardId { + accountShardId = 0 + } + + tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, accountShardId) + tpn.initDataPools() + tpn.initTestNode() + + return tpn +} + +// CreateNodesWithNodesCoordinator returns a map with nodes per shard each using a real nodes coordinator +func CreateNodesWithNodesCoordinator( + nodesPerShard int, + nbMetaNodes int, + nbShards int, + shardConsensusGroupSize int, + metaConsensusGroupSize int, + seedAddress string, +) map[uint32][]*TestProcessorNode { + cp := CreateCryptoParams(nodesPerShard, nbMetaNodes, uint32(nbShards)) + pubKeys := PubKeysMapFromKeysMap(cp.Keys) + validatorsMap := GenValidatorsFromPubKeys(pubKeys, uint32(nbShards)) + nodesMap := make(map[uint32][]*TestProcessorNode) + for shardId, validatorList := range validatorsMap { + nodesCoordinator, err := sharding.NewIndexHashedNodesCoordinator( + shardConsensusGroupSize, + metaConsensusGroupSize, + TestHasher, + shardId, + uint32(nbShards), + validatorsMap, + ) + + if err != nil { + fmt.Println("Error creating node coordinator") + } + + nodesList := make([]*TestProcessorNode, len(validatorList)) + for i := range validatorList { + nodesList[i] = NewTestProcessorNodeWithCustomNodesCoordinator( + uint32(nbShards), + shardId, + seedAddress, + nodesCoordinator, + cp, + i, + ) + } + nodesMap[shardId] = nodesList + } + + return nodesMap +} + +// ProposeBlockWithConsensusSignature proposes +func ProposeBlockWithConsensusSignature( + shardId uint32, + nodesMap map[uint32][]*TestProcessorNode, + round uint64, + nonce uint64, + randomness []byte, +) (data.BodyHandler, data.HeaderHandler, [][]byte, []*TestProcessorNode) { + + nodesCoordinator := nodesMap[shardId][0].NodesCoordinator + pubKeys, err := nodesCoordinator.GetValidatorsPublicKeys(randomness, round, shardId) + if err != nil { + fmt.Println("Error getting the validators public keys: ", err) + } + + // set the consensus reward addresses + for _, node := range nodesMap[shardId] { + node.BlockProcessor.SetConsensusData(randomness, round, 0, shardId) + } + + consensusNodes := selectTestNodesForPubKeys(nodesMap[shardId], pubKeys) + // first node is block proposer + body, header, txHashes := consensusNodes[0].ProposeBlock(round, nonce) + header.SetPrevRandSeed(randomness) + header = DoConsensusSigningOnBlock(header, consensusNodes, pubKeys) + + return body, header, txHashes, consensusNodes +} + +func selectTestNodesForPubKeys(nodes []*TestProcessorNode, pubKeys []string) []*TestProcessorNode { + selectedNodes := make([]*TestProcessorNode, len(pubKeys)) + cntNodes := 0 + + for i, pk := range pubKeys { + for _, node := range nodes { + pubKeyBytes, _ := node.NodeKeys.Pk.ToByteArray() + if bytes.Equal(pubKeyBytes, []byte(pk)) { + selectedNodes[i] = node + cntNodes++ + } + } + } + + if cntNodes != len(pubKeys) { + fmt.Println("Error selecting nodes from public keys") + } + + return selectedNodes +} + +// DoConsensusSigningOnBlock simulates a consensus aggregated signature on the provided block +func DoConsensusSigningOnBlock( + blockHeader data.HeaderHandler, + consensusNodes []*TestProcessorNode, + pubKeys []string, +) data.HeaderHandler { + // set bitmap for all consensus nodes signing + bitmap := make([]byte, len(consensusNodes)/8+1) + for i := range bitmap { + bitmap[i] = 0xFF + } + + bitmap[len(consensusNodes)/8] >>= uint8(8 - (len(consensusNodes) % 8)) + blockHeader.SetPubKeysBitmap(bitmap) + // clear signature, as we need to compute it below + blockHeader.SetSignature(nil) + blockHeader.SetPubKeysBitmap(nil) + blockHeaderHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, blockHeader) + + var msig crypto.MultiSigner + msigProposer, _ := consensusNodes[0].MultiSigner.Create(pubKeys, 0) + _, _ = msigProposer.CreateSignatureShare(blockHeaderHash, bitmap) + + for i := 1; i < len(consensusNodes); i++ { + msig, _ = consensusNodes[i].MultiSigner.Create(pubKeys, uint16(i)) + sigShare, _ := msig.CreateSignatureShare(blockHeaderHash, bitmap) + _ = msigProposer.StoreSignatureShare(uint16(i), sigShare) + } + + sig, _ := msigProposer.AggregateSigs(bitmap) + blockHeader.SetSignature(sig) + blockHeader.SetPubKeysBitmap(bitmap) + + return blockHeader +} + +// AllShardsProposeBlock simulates each shard selecting a consensus group and proposing/broadcasting/committing a block +func AllShardsProposeBlock( + round uint64, + nonce uint64, + prevRandomness map[uint32][]byte, + nodesMap map[uint32][]*TestProcessorNode, +) ( + map[uint32]data.BodyHandler, + map[uint32]data.HeaderHandler, + map[uint32][]*TestProcessorNode, + map[uint32][]byte, +) { + + body := make(map[uint32]data.BodyHandler) + header := make(map[uint32]data.HeaderHandler) + consensusNodes := make(map[uint32][]*TestProcessorNode) + newRandomness := make(map[uint32][]byte) + + // propose blocks + for i := range nodesMap { + body[i], header[i], _, consensusNodes[i] = ProposeBlockWithConsensusSignature(i, nodesMap, round, nonce, prevRandomness[i]) + newRandomness[i] = header[i].GetRandSeed() + } + + // propagate blocks + for i := range nodesMap { + consensusNodes[i][0].BroadcastBlock(body[i], header[i]) + consensusNodes[i][0].CommitBlock(body[i], header[i]) + } + + time.Sleep(2 * time.Second) + + return body, header, consensusNodes, newRandomness +} + +// SyncAllShardsWithRoundBlock enforces all nodes in each shard synchronizing the block for the given round +func SyncAllShardsWithRoundBlock( + t *testing.T, + nodesMap map[uint32][]*TestProcessorNode, + indexProposers map[uint32]int, + round uint64, +) { + for shard, nodeList := range nodesMap { + SyncBlock(t, nodeList, []int{indexProposers[shard]}, round) + } + time.Sleep(2 * time.Second) +} + +// VerifyNodesHaveHeaders verifies that each node has the corresponding header +func VerifyNodesHaveHeaders( + t *testing.T, + headers map[uint32]data.HeaderHandler, + nodesMap map[uint32][]*TestProcessorNode, +) { + var v interface{} + var ok bool + + // all nodes in metachain have the block headers in pool as interceptor validates them + for shHeader, header := range headers { + headerHash, _ := core.CalculateHash(TestMarshalizer, TestHasher, header) + + for _, metaNode := range nodesMap[sharding.MetachainShardId] { + if shHeader == sharding.MetachainShardId { + v, ok = metaNode.MetaDataPool.MetaChainBlocks().Get(headerHash) + } else { + v, ok = metaNode.MetaDataPool.ShardHeaders().Get(headerHash) + } + + assert.True(t, ok) + assert.Equal(t, header, v) + } + + // all nodes in shards need to have their own shard headers and metachain headers + for sh, nodesList := range nodesMap { + if sh == sharding.MetachainShardId { + continue + } + + if sh != shHeader && shHeader != sharding.MetachainShardId { + continue + } + + for _, node := range nodesList { + if shHeader == sharding.MetachainShardId { + v, ok = node.ShardDataPool.MetaBlocks().Get(headerHash) + } else { + v, ok = node.ShardDataPool.Headers().Get(headerHash) + } + assert.True(t, ok) + assert.Equal(t, header, v) + } + } + } +} diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 5d9ff723a2e..2182fe07969 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -5,9 +5,7 @@ import ( "fmt" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" - "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/sync" @@ -23,13 +21,24 @@ func NewTestSyncNode( ) *TestProcessorNode { shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) + nodesCoordinator := &mock.NodesCoordinatorMock{} messenger := CreateMessengerWithKadDht(context.Background(), initialNodeAddr) + tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, Messenger: messenger, + NodesCoordinator: nodesCoordinator, + } + + kg := &mock.KeyGenMock{} + sk, pk := kg.GeneratePair() + tpn.NodeKeys = &TestKeyPair{ + Sk: sk, + Pk: pk, } + tpn.MultiSigner = TestMultiSig tpn.OwnAccount = CreateTestWalletAccount(shardCoordinator, txSignPrivKeyShardId) tpn.initDataPools() tpn.initTestNodeWithSync() @@ -40,9 +49,15 @@ func NewTestSyncNode( func (tpn *TestProcessorNode) initTestNodeWithSync() { tpn.initRounder() tpn.initStorage() - tpn.AccntState, _, _ = CreateAccountsDB(tpn.ShardCoordinator) + tpn.AccntState, _, _ = CreateAccountsDB(0) tpn.initChainHandler() tpn.GenesisBlocks = CreateGenesisBlocks(tpn.ShardCoordinator) + tpn.SpecialAddressHandler = mock.NewSpecialAddressHandlerMock( + TestAddressConverter, + tpn.ShardCoordinator, + tpn.NodesCoordinator, + ) + tpn.initEconomicsData() tpn.initInterceptors() tpn.initResolvers() tpn.initInnerProcessors() @@ -64,50 +79,40 @@ func (tpn *TestProcessorNode) initTestNodeWithSync() { func (tpn *TestProcessorNode) initBlockProcessorWithSync() { var err error - tpn.BlockTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, + argumentsBase := block.ArgBaseProcessor{ + Accounts: tpn.AccntState, + ForkDetector: nil, + Hasher: TestHasher, + Marshalizer: TestMarshalizer, + Store: tpn.Storage, + ShardCoordinator: tpn.ShardCoordinator, + NodesCoordinator: tpn.NodesCoordinator, + SpecialAddressHandler: tpn.SpecialAddressHandler, + Uint64Converter: TestUint64Converter, + StartHeaders: tpn.GenesisBlocks, + RequestHandler: tpn.RequestHandler, + Core: nil, } if tpn.ShardCoordinator.SelfId() == sharding.MetachainShardId { tpn.ForkDetector, _ = sync.NewMetaForkDetector(tpn.Rounder) - tpn.BlockProcessor, err = block.NewMetaProcessor( - &mock.ServiceContainerMock{}, - tpn.AccntState, - tpn.MetaDataPool, - tpn.ForkDetector, - tpn.ShardCoordinator, - TestHasher, - TestMarshalizer, - tpn.Storage, - tpn.GenesisBlocks, - tpn.RequestHandler, - TestUint64Converter, - ) + argumentsBase.Core = &mock.ServiceContainerMock{} + argumentsBase.ForkDetector = tpn.ForkDetector + arguments := block.ArgMetaProcessor{ + ArgBaseProcessor: argumentsBase, + DataPool: tpn.MetaDataPool, + } + + tpn.BlockProcessor, err = block.NewMetaProcessor(arguments) + } else { tpn.ForkDetector, _ = sync.NewShardForkDetector(tpn.Rounder) + argumentsBase.ForkDetector = tpn.ForkDetector arguments := block.ArgShardProcessor{ - ArgBaseProcessor: &block.ArgBaseProcessor{ - Accounts: tpn.AccntState, - ForkDetector: tpn.ForkDetector, - Hasher: TestHasher, - Marshalizer: TestMarshalizer, - Store: tpn.Storage, - ShardCoordinator: tpn.ShardCoordinator, - Uint64Converter: TestUint64Converter, - StartHeaders: tpn.GenesisBlocks, - RequestHandler: tpn.RequestHandler, - Core: nil, - }, - DataPool: tpn.ShardDataPool, - BlocksTracker: tpn.BlockTracker, - TxCoordinator: tpn.TxCoordinator, + ArgBaseProcessor: argumentsBase, + DataPool: tpn.ShardDataPool, + TxCoordinator: tpn.TxCoordinator, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } tpn.BlockProcessor, err = block.NewShardProcessor(arguments) @@ -118,7 +123,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { } } -func (tpn *TestProcessorNode) createShardBootstrapper() (process.Bootstrapper, error) { +func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error) { bootstrap, err := sync.NewShardBootstrap( tpn.ShardDataPool, tpn.Storage, @@ -138,10 +143,12 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (process.Bootstrapper, e return nil, err } - return bootstrap, nil + return &sync.TestShardBootstrap{ + ShardBootstrap: bootstrap, + }, nil } -func (tpn *TestProcessorNode) createMetaChainBootstrapper() (process.Bootstrapper, error) { +func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, error) { bootstrap, err := sync.NewMetaBootstrap( tpn.MetaDataPool, tpn.Storage, @@ -162,7 +169,9 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (process.Bootstrappe return nil, err } - return bootstrap, nil + return &sync.TestMetaBootstrap{ + MetaBootstrap: bootstrap, + }, nil } func (tpn *TestProcessorNode) initBootstrapper() { diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index a6f2118c95b..4d11c52c111 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -13,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/smartContract" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" @@ -100,8 +101,25 @@ func CreateTxProcessorWithOneSCExecutorMockVM(accnts state.AccountsAdapter, opGa addrConv, oneShardCoordinator, &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler( + addrConv, + oneShardCoordinator, + accnts) + + txProcessor, _ := transaction.NewTxProcessor( + accnts, + testHasher, + addrConv, + testMarshalizer, + oneShardCoordinator, + scProcessor, + &mock.UnsignedTxHandlerMock{}, + txTypeHandler, + &mock.FeeHandlerStub{}, ) - txProcessor, _ := transaction.NewTxProcessor(accnts, testHasher, addrConv, testMarshalizer, oneShardCoordinator, scProcessor) return txProcessor } @@ -144,8 +162,25 @@ func CreateTxProcessorWithOneSCExecutorIeleVM( addrConv, oneShardCoordinator, &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + + txTypeHandler, _ := coordinator.NewTxTypeHandler( + addrConv, + oneShardCoordinator, + accnts) + + txProcessor, _ := transaction.NewTxProcessor( + accnts, + testHasher, + addrConv, + testMarshalizer, + oneShardCoordinator, + scProcessor, + &mock.UnsignedTxHandlerMock{}, + txTypeHandler, + &mock.FeeHandlerStub{}, ) - txProcessor, _ := transaction.NewTxProcessor(accnts, testHasher, addrConv, testMarshalizer, oneShardCoordinator, scProcessor) return txProcessor, blockChainHook } diff --git a/node/defineOptions.go b/node/defineOptions.go index e812025e30b..e266323d178 100644 --- a/node/defineOptions.go +++ b/node/defineOptions.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" @@ -214,17 +215,6 @@ func WithBlockProcessor(blockProcessor process.BlockProcessor) Option { } } -// WithBlockTracker sets up the block tracker option for the Node -func WithBlockTracker(blockTracker process.BlocksTracker) Option { - return func(n *Node) error { - if blockTracker == nil || blockTracker.IsInterfaceNil() { - return ErrNilBlockTracker - } - n.blockTracker = blockTracker - return nil - } -} - // WithGenesisTime sets up the genesis time option for the Node func WithGenesisTime(genesisTime time.Time) Option { return func(n *Node) error { @@ -266,6 +256,17 @@ func WithShardCoordinator(shardCoordinator sharding.Coordinator) Option { } } +// WithNodesCoordinator sets up the nodes coordinator +func WithNodesCoordinator(nodesCoordinator sharding.NodesCoordinator) Option { + return func(n *Node) error { + if nodesCoordinator == nil { + return ErrNilNodesCoordinator + } + n.nodesCoordinator = nodesCoordinator + return nil + } +} + // WithUint64ByteSliceConverter sets up the uint64 <-> []byte converter func WithUint64ByteSliceConverter(converter typeConverters.Uint64ByteSliceConverter) Option { return func(n *Node) error { @@ -389,3 +390,11 @@ func WithAppStatusHandler(aph core.AppStatusHandler) Option { return nil } } + +// WithIndexer sets up a indexer for the Node +func WithIndexer(indexer indexer.Indexer) Option { + return func(n *Node) error { + n.indexer = indexer + return nil + } +} diff --git a/node/defineOptions_test.go b/node/defineOptions_test.go index 50f248f61dd..b8c89e35624 100644 --- a/node/defineOptions_test.go +++ b/node/defineOptions_test.go @@ -544,6 +544,32 @@ func TestWithShardCoordinator_ShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestWithNodesCoordinator_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + opt := WithNodesCoordinator(nil) + err := opt(node) + + assert.Nil(t, node.nodesCoordinator) + assert.Equal(t, ErrNilNodesCoordinator, err) +} + +func TestWithNodesCoordinator_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + nodesCoordinator := &mock.NodesCoordinatorMock{} + + opt := WithNodesCoordinator(nodesCoordinator) + err := opt(node) + + assert.True(t, node.nodesCoordinator == nodesCoordinator) + assert.Nil(t, err) +} + func TestWithUint64ByteSliceConverter_NilConverterShouldErr(t *testing.T) { t.Parallel() @@ -763,3 +789,16 @@ func TestWithAppStatusHandler_OkAshShouldPass(t *testing.T) { assert.IsType(t, &statusHandler.NilStatusHandler{}, node.appStatusHandler) assert.Nil(t, err) } + +func TestWithIndexer_ShouldWork(t *testing.T) { + t.Parallel() + + node, _ := NewNode() + + indexer := &mock.IndexerMock{} + opt := WithIndexer(indexer) + err := opt(node) + + assert.Equal(t, indexer, node.indexer) + assert.Nil(t, err) +} diff --git a/node/errors.go b/node/errors.go index 788d56d2c8c..b8dd7743455 100644 --- a/node/errors.go +++ b/node/errors.go @@ -49,15 +49,15 @@ var ErrNilRounder = errors.New("trying to set nil rounder") // ErrNilBlockProcessor signals that a nil block processor has been provided var ErrNilBlockProcessor = errors.New("trying to set nil block processor") -// ErrNilBlockTracker signals that a nil block tracker has been provided -var ErrNilBlockTracker = errors.New("trying to set nil block tracker") - // ErrNilDataPool signals that a nil data pool has been provided var ErrNilDataPool = errors.New("trying to set nil data pool") // ErrNilShardCoordinator signals that a nil shard coordinator has been provided var ErrNilShardCoordinator = errors.New("trying to set nil shard coordinator") +// ErrNilNodesCoordinator signals that a nil nodes coordinator has been provided +var ErrNilNodesCoordinator = errors.New("trying to set nil nodes coordinator") + // ErrNilUint64ByteSliceConverter signals that a nil uint64 <-> byte slice converter has been provided var ErrNilUint64ByteSliceConverter = errors.New("trying to set nil uint64 - byte slice converter") diff --git a/node/heartbeat/errors.go b/node/heartbeat/errors.go index 4313c32ee16..37c929a94a3 100644 --- a/node/heartbeat/errors.go +++ b/node/heartbeat/errors.go @@ -34,3 +34,27 @@ var ErrNilAppStatusHandler = errors.New("nil AppStatusHandler") // ErrNilShardCoordinator signals that an operation has been attempted to or with a nil shard coordinator var ErrNilShardCoordinator = errors.New("nil shard coordinator") + +// ErrNilTimer signals that a nil time getter handler has been provided +var ErrNilTimer = errors.New("nil time getter handler") + +// ErrNilMonitorDb signals that a nil monitor db was provided +var ErrNilMonitorDb = errors.New("nil monitor db") + +// ErrNilMessageHandler signals that the provided message handler is nil +var ErrNilMessageHandler = errors.New("nil message handler") + +// ErrNilHeartbeatStorer signals that the provided heartbeat storer is nil +var ErrNilHeartbeatStorer = errors.New("nil heartbeat storer") + +// ErrFetchGenesisTimeFromDb signals that the genesis time cannot be fetched from db +var ErrFetchGenesisTimeFromDb = errors.New("monitor: can't get genesis time from db") + +// ErrStoreGenesisTimeToDb signals that the genesis time cannot be store to db +var ErrStoreGenesisTimeToDb = errors.New("monitor: can't store genesis time") + +// ErrUnmarshalGenesisTime signals that the unmarshaling of the genesis time didn't work +var ErrUnmarshalGenesisTime = errors.New("monitor: can't unmarshal genesis time") + +// ErrMarshalGenesisTime signals that the marshaling of the genesis time didn't work +var ErrMarshalGenesisTime = errors.New("monitor: can't marshal genesis time") diff --git a/node/heartbeat/export_test.go b/node/heartbeat/export_test.go new file mode 100644 index 00000000000..ba086141946 --- /dev/null +++ b/node/heartbeat/export_test.go @@ -0,0 +1,72 @@ +package heartbeat + +import "time" + +func (m *Monitor) GetMessages() map[string]*heartbeatMessageInfo { + return m.heartbeatMessages +} + +func (m *Monitor) SetMessages(messages map[string]*heartbeatMessageInfo) { + m.heartbeatMessages = messages +} + +func (m *Monitor) GetHbmi(tmstp time.Time) *heartbeatMessageInfo { + return &heartbeatMessageInfo{ + maxDurationPeerUnresponsive: 0, + maxInactiveTime: Duration{}, + totalUpTime: Duration{}, + totalDownTime: Duration{}, + getTimeHandler: nil, + timeStamp: time.Time{}, + isActive: false, + receivedShardID: 0, + computedShardID: 0, + versionNumber: "", + nodeDisplayName: "", + isValidator: false, + lastUptimeDowntime: time.Time{}, + genesisTime: time.Time{}, + } +} + +func (m *Monitor) SendHeartbeatMessage(hb *Heartbeat) { + m.addHeartbeatMessageToMap(hb) +} + +func (m *Monitor) AddHeartbeatMessageToMap(hb *Heartbeat) { + m.addHeartbeatMessageToMap(hb) +} + +func NewHeartbeatMessageInfo( + maxDurationPeerUnresponsive time.Duration, + isValidator bool, + genesisTime time.Time, + timer Timer, +) (*heartbeatMessageInfo, error) { + return newHeartbeatMessageInfo( + maxDurationPeerUnresponsive, + isValidator, + genesisTime, + timer, + ) +} + +func (hbmi *heartbeatMessageInfo) GetTimeStamp() time.Time { + return hbmi.timeStamp +} + +func (hbmi *heartbeatMessageInfo) GetReceiverShardId() uint32 { + return hbmi.receivedShardID +} + +func (hbmi *heartbeatMessageInfo) GetTotalUpTime() Duration { + return hbmi.totalUpTime +} + +func (hbmi *heartbeatMessageInfo) GetTotalDownTime() Duration { + return hbmi.totalDownTime +} + +func (hbmi *heartbeatMessageInfo) GetIsActive() bool { + return hbmi.isActive +} diff --git a/node/heartbeat/hearbeatMessageInfo.go b/node/heartbeat/hearbeatMessageInfo.go index 56944d040d5..8d80921d0cb 100644 --- a/node/heartbeat/hearbeatMessageInfo.go +++ b/node/heartbeat/hearbeatMessageInfo.go @@ -4,8 +4,6 @@ import ( "time" ) -var emptyTimestamp = time.Time{} - // heartbeatMessageInfo retain the message info received from another node (identified by a public key) type heartbeatMessageInfo struct { maxDurationPeerUnresponsive time.Duration @@ -22,81 +20,116 @@ type heartbeatMessageInfo struct { nodeDisplayName string isValidator bool lastUptimeDowntime time.Time + genesisTime time.Time } // newHeartbeatMessageInfo returns a new instance of a heartbeatMessageInfo func newHeartbeatMessageInfo( maxDurationPeerUnresponsive time.Duration, isValidator bool, + genesisTime time.Time, + timer Timer, ) (*heartbeatMessageInfo, error) { if maxDurationPeerUnresponsive == 0 { return nil, ErrInvalidMaxDurationPeerUnresponsive } + if timer == nil || timer.IsInterfaceNil() { + return nil, ErrNilTimer + } hbmi := &heartbeatMessageInfo{ maxDurationPeerUnresponsive: maxDurationPeerUnresponsive, maxInactiveTime: Duration{0}, isActive: false, receivedShardID: uint32(0), - timeStamp: emptyTimestamp, - lastUptimeDowntime: time.Now(), + timeStamp: genesisTime, + lastUptimeDowntime: timer.Now(), totalUpTime: Duration{0}, totalDownTime: Duration{0}, versionNumber: "", nodeDisplayName: "", isValidator: isValidator, + genesisTime: genesisTime, + getTimeHandler: timer.Now, } - hbmi.getTimeHandler = hbmi.clockTime return hbmi, nil } -func (hbmi *heartbeatMessageInfo) clockTime() time.Time { - return time.Now() +func (hbmi *heartbeatMessageInfo) updateFields(crtTime time.Time) { + validDuration := computeValidDuration(crtTime, hbmi) + previousActive := hbmi.isActive && validDuration + hbmi.isActive = true + + hbmi.updateTimes(crtTime, previousActive) } -func (hbmi *heartbeatMessageInfo) updateFields() { - crtDuration := hbmi.getTimeHandler().Sub(hbmi.timeStamp) - crtDuration = maxDuration(0, crtDuration) +func (hbmi *heartbeatMessageInfo) computeActive(crtTime time.Time) { + validDuration := computeValidDuration(crtTime, hbmi) + hbmi.isActive = hbmi.isActive && validDuration - hbmi.isActive = crtDuration < hbmi.maxDurationPeerUnresponsive - hbmi.updateUpAndDownTime() - hbmi.updateMaxInactiveTimeDuration() + hbmi.updateTimes(crtTime, hbmi.isActive) +} + +func (hbmi *heartbeatMessageInfo) updateTimes(crtTime time.Time, previousActive bool) { + if crtTime.Sub(hbmi.genesisTime) < 0 { + return + } + hbmi.updateMaxInactiveTimeDuration(crtTime) + hbmi.updateUpAndDownTime(previousActive, crtTime) } -// Wil update the total time a node was up and down -func (hbmi *heartbeatMessageInfo) updateUpAndDownTime() { - lastDuration := hbmi.clockTime().Sub(hbmi.lastUptimeDowntime) +func computeValidDuration(crtTime time.Time, hbmi *heartbeatMessageInfo) bool { + crtDuration := crtTime.Sub(hbmi.timeStamp) + crtDuration = maxDuration(0, crtDuration) + validDuration := crtDuration <= hbmi.maxDurationPeerUnresponsive + return validDuration +} + +// Will update the total time a node was up and down +func (hbmi *heartbeatMessageInfo) updateUpAndDownTime(previousActive bool, crtTime time.Time) { + if hbmi.lastUptimeDowntime.Sub(hbmi.genesisTime) < 0 { + hbmi.lastUptimeDowntime = hbmi.genesisTime + } + + lastDuration := crtTime.Sub(hbmi.lastUptimeDowntime) lastDuration = maxDuration(0, lastDuration) - if hbmi.isActive { + if previousActive && hbmi.isActive { hbmi.totalUpTime.Duration += lastDuration } else { hbmi.totalDownTime.Duration += lastDuration } - hbmi.lastUptimeDowntime = time.Now() + hbmi.lastUptimeDowntime = crtTime } // HeartbeatReceived processes a new message arrived from a peer -func (hbmi *heartbeatMessageInfo) HeartbeatReceived(computedShardID, receivedshardID uint32, version string, - nodeDisplayName string) { +func (hbmi *heartbeatMessageInfo) HeartbeatReceived( + computedShardID uint32, + receivedshardID uint32, + version string, + nodeDisplayName string, +) { crtTime := hbmi.getTimeHandler() - hbmi.updateFields() + hbmi.updateFields(crtTime) hbmi.computedShardID = computedShardID hbmi.receivedShardID = receivedshardID - hbmi.updateMaxInactiveTimeDuration() + hbmi.updateMaxInactiveTimeDuration(crtTime) hbmi.timeStamp = crtTime hbmi.versionNumber = version hbmi.nodeDisplayName = nodeDisplayName } -func (hbmi *heartbeatMessageInfo) updateMaxInactiveTimeDuration() { - crtDuration := hbmi.getTimeHandler().Sub(hbmi.timeStamp) +func (hbmi *heartbeatMessageInfo) updateMaxInactiveTimeDuration(currentTime time.Time) { + crtDuration := currentTime.Sub(hbmi.timeStamp) crtDuration = maxDuration(0, crtDuration) - if hbmi.maxInactiveTime.Duration < crtDuration && hbmi.timeStamp != emptyTimestamp { + greaterDurationThanMax := hbmi.maxInactiveTime.Duration < crtDuration + currentTimeAfterGenesis := hbmi.genesisTime.Sub(currentTime) < 0 + + if greaterDurationThanMax && currentTimeAfterGenesis { hbmi.maxInactiveTime.Duration = crtDuration } } diff --git a/node/heartbeat/hearbeatMessageInfo_test.go b/node/heartbeat/hearbeatMessageInfo_test.go index d10d8715b1b..d58eb2c0e48 100644 --- a/node/heartbeat/hearbeatMessageInfo_test.go +++ b/node/heartbeat/hearbeatMessageInfo_test.go @@ -1,90 +1,217 @@ -package heartbeat +package heartbeat_test import ( "testing" "time" + "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/mock" "github.com/stretchr/testify/assert" ) -//------ newHeartbeatMessageInfo +//------- newHeartbeatMessageInfo + func TestNewHeartbeatMessageInfo_InvalidDurationShouldErr(t *testing.T) { t.Parallel() - hbmi, err := newHeartbeatMessageInfo(0, false) + hbmi, err := heartbeat.NewHeartbeatMessageInfo( + 0, + false, + time.Time{}, + &mock.MockTimer{}, + ) assert.Nil(t, hbmi) - assert.Equal(t, ErrInvalidMaxDurationPeerUnresponsive, err) + assert.Equal(t, heartbeat.ErrInvalidMaxDurationPeerUnresponsive, err) +} + +func TestNewHeartbeatMessageInfo_NilGetTimeHandlerShouldErr(t *testing.T) { + t.Parallel() + + hbmi, err := heartbeat.NewHeartbeatMessageInfo( + 1, + false, + time.Time{}, + nil, + ) + + assert.Nil(t, hbmi) + assert.Equal(t, heartbeat.ErrNilTimer, err) } func TestNewHeartbeatMessageInfo_OkValsShouldWork(t *testing.T) { t.Parallel() - hbmi, err := newHeartbeatMessageInfo(1, false) + hbmi, err := heartbeat.NewHeartbeatMessageInfo( + 1, + false, + time.Time{}, + &mock.MockTimer{}, + ) assert.NotNil(t, hbmi) assert.Nil(t, err) } +//------- HeartbeatReceived + func TestHeartbeatMessageInfo_HeartbeatReceivedShouldUpdate(t *testing.T) { t.Parallel() - hbmi, _ := newHeartbeatMessageInfo(time.Duration(10), false) - incrementalTime := int64(0) - hbmi.getTimeHandler = func() time.Time { - if incrementalTime < 2 { - incrementalTime++ - } - return time.Unix(0, incrementalTime) - } + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 10*time.Second, + false, + genesisTime, + mockTimer, + ) - assert.Equal(t, emptyTimestamp, hbmi.timeStamp) + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) + mockTimer.IncrementSeconds(1) + + expectedTime := time.Unix(1, 0) hbmi.HeartbeatReceived(uint32(0), uint32(0), "v0.1", "undefined") - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) - assert.Equal(t, uint32(0), hbmi.receivedShardID) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) + assert.Equal(t, uint32(0), hbmi.GetReceiverShardId()) + mockTimer.IncrementSeconds(1) + expectedTime = time.Unix(2, 0) hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) - assert.Equal(t, uint32(1), hbmi.receivedShardID) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) + assert.Equal(t, uint32(1), hbmi.GetReceiverShardId()) } func TestHeartbeatMessageInfo_HeartbeatUpdateFieldsShouldWork(t *testing.T) { t.Parallel() - hbmi, _ := newHeartbeatMessageInfo(time.Duration(1), false) - incrementalTime := int64(0) - hbmi.getTimeHandler = func() time.Time { - tReturned := time.Unix(0, incrementalTime) - incrementalTime += 10 + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) - return tReturned - } + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) - assert.Equal(t, emptyTimestamp, hbmi.timeStamp) + mockTimer.IncrementSeconds(1) + expectedTime := time.Unix(1, 0) + expectedUptime := time.Duration(0) + expectedDownTime := time.Duration(1 * time.Second) hbmi.HeartbeatReceived(uint32(0), uint32(3), "v0.1", "undefined") - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) + assert.Equal(t, true, hbmi.GetIsActive()) + assert.Equal(t, expectedUptime, hbmi.GetTotalUpTime().Duration) + assert.Equal(t, expectedDownTime, hbmi.GetTotalDownTime().Duration) } -func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpTime(t *testing.T) { +func TestHeartbeatMessageInfo_HeartbeatShouldUpdateUpDownTime(t *testing.T) { t.Parallel() - hbmi, _ := newHeartbeatMessageInfo(time.Duration(10), false) - incrementalTime := int64(0) - hbmi.getTimeHandler = func() time.Time { - tReturned := time.Unix(0, incrementalTime) - incrementalTime += 1 + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) - return tReturned - } + // send heartbeat twice in order to calculate the duration between thm + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(2), "v0.1", "undefined") + + expectedDownDuration := time.Duration(1 * time.Second) + expectedUpDuration := time.Duration(1 * time.Second) + assert.Equal(t, expectedUpDuration, hbmi.GetTotalUpTime().Duration) + assert.Equal(t, expectedDownDuration, hbmi.GetTotalDownTime().Duration) + expectedTime := time.Unix(2, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) +} + +func TestHeartbeatMessageInfo_HeartbeatLongerDurationThanMaxShouldUpdateDownTime(t *testing.T) { + t.Parallel() - assert.Equal(t, emptyTimestamp, hbmi.timeStamp) + mockTimer := &mock.MockTimer{} + genesisTime := mockTimer.Now() + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 500*time.Millisecond, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) // send heartbeat twice in order to calculate the duration between thm + mockTimer.IncrementSeconds(1) hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + mockTimer.IncrementSeconds(1) hbmi.HeartbeatReceived(uint32(0), uint32(2), "v0.1", "undefined") - assert.True(t, hbmi.totalUpTime.Duration > time.Duration(0)) - assert.NotEqual(t, emptyTimestamp, hbmi.timeStamp) + expectedDownDuration := time.Duration(2 * time.Second) + expectedUpDuration := time.Duration(0) + assert.Equal(t, expectedDownDuration, hbmi.GetTotalDownTime().Duration) + assert.Equal(t, expectedUpDuration, hbmi.GetTotalUpTime().Duration) + expectedTime := time.Unix(2, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) +} + +func TestHeartbeatMessageInfo_HeartbeatBeforeGenesisShouldNotUpdateUpDownTime(t *testing.T) { + t.Parallel() + + mockTimer := &mock.MockTimer{} + genesisTime := time.Unix(5, 0) + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) + + // send heartbeat twice in order to calculate the duration between thm + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(2), "v0.1", "undefined") + + expectedDuration := time.Duration(0) + assert.Equal(t, expectedDuration, hbmi.GetTotalDownTime().Duration) + assert.Equal(t, expectedDuration, hbmi.GetTotalUpTime().Duration) + expectedTime := time.Unix(2, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) +} + +func TestHeartbeatMessageInfo_HeartbeatEqualGenesisShouldHaveUpDownTimeZero(t *testing.T) { + t.Parallel() + + mockTimer := &mock.MockTimer{} + genesisTime := time.Unix(1, 0) + hbmi, _ := heartbeat.NewHeartbeatMessageInfo( + 100*time.Second, + false, + genesisTime, + mockTimer, + ) + + assert.Equal(t, genesisTime, hbmi.GetTimeStamp()) + mockTimer.IncrementSeconds(1) + hbmi.HeartbeatReceived(uint32(0), uint32(1), "v0.1", "undefined") + + expectedDuration := time.Duration(0) + assert.Equal(t, expectedDuration, hbmi.GetTotalUpTime().Duration) + assert.Equal(t, expectedDuration, hbmi.GetTotalDownTime().Duration) + expectedTime := time.Unix(1, 0) + assert.Equal(t, expectedTime, hbmi.GetTimeStamp()) } diff --git a/node/heartbeat/heartbeat.go b/node/heartbeat/heartbeat.go index 4b549e57d8f..e8f6ca23b32 100644 --- a/node/heartbeat/heartbeat.go +++ b/node/heartbeat/heartbeat.go @@ -22,9 +22,26 @@ type PubKeyHeartbeat struct { IsActive bool `json:"isActive"` ReceivedShardID uint32 `json:"receivedShardID"` ComputedShardID uint32 `json:"computedShardID"` - TotalUpTime Duration `json:"totalUpTime"` - TotalDownTime Duration `json:"totalDownTime"` + TotalUpTime int `json:"totalUpTimeSec"` + TotalDownTime int `json:"totalDownTimeSec"` VersionNumber string `json:"versionNumber"` IsValidator bool `json:"isValidator"` NodeDisplayName string `json:"nodeDisplayName"` } + +// HeartbeatDTO is the struct used for handling DB operations for heartbeatMessageInfo struct +type HeartbeatDTO struct { + MaxDurationPeerUnresponsive time.Duration + MaxInactiveTime Duration + TotalUpTime Duration + TotalDownTime Duration + TimeStamp time.Time + IsActive bool + ReceivedShardID uint32 + ComputedShardID uint32 + VersionNumber string + NodeDisplayName string + IsValidator bool + LastUptimeDowntime time.Time + GenesisTime time.Time +} diff --git a/node/heartbeat/interface.go b/node/heartbeat/interface.go index 12833c36901..550e6914965 100644 --- a/node/heartbeat/interface.go +++ b/node/heartbeat/interface.go @@ -1,7 +1,36 @@ package heartbeat +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/p2p" +) + // PeerMessenger defines a subset of the p2p.Messenger interface type PeerMessenger interface { Broadcast(topic string, buff []byte) IsInterfaceNil() bool } + +// MessageHandler defines what a message processor for heartbeat should do +type MessageHandler interface { + CreateHeartbeatFromP2pMessage(message p2p.MessageP2P) (*Heartbeat, error) + IsInterfaceNil() bool +} + +//Timer defines an interface for tracking time +type Timer interface { + Now() time.Time + IsInterfaceNil() bool +} + +// HeartbeatStorageHandler defines what a heartbeat's storer should do +type HeartbeatStorageHandler interface { + LoadGenesisTime() (time.Time, error) + UpdateGenesisTime(genesisTime time.Time) error + LoadHbmiDTO(pubKey string) (*HeartbeatDTO, error) + SavePubkeyData(pubkey []byte, heartbeat *HeartbeatDTO) error + LoadKeys() ([][]byte, error) + SaveKeys(peersSlice [][]byte) error + IsInterfaceNil() bool +} diff --git a/node/heartbeat/messageProcessor.go b/node/heartbeat/messageProcessor.go new file mode 100644 index 00000000000..4c21686bcc7 --- /dev/null +++ b/node/heartbeat/messageProcessor.go @@ -0,0 +1,86 @@ +package heartbeat + +import ( + "github.com/ElrondNetwork/elrond-go/crypto" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +// MessageProcessor is the struct that will handle heartbeat message verifications and conversion between +// heartbeatMessageInfo and HeartbeatDTO +type MessageProcessor struct { + singleSigner crypto.SingleSigner + keygen crypto.KeyGenerator + marshalizer marshal.Marshalizer +} + +// NewMessageProcessor will return a new instance of MessageProcessor +func NewMessageProcessor( + singleSigner crypto.SingleSigner, + keygen crypto.KeyGenerator, + marshalizer marshal.Marshalizer, +) (*MessageProcessor, error) { + if singleSigner == nil || singleSigner.IsInterfaceNil() { + return nil, ErrNilSingleSigner + } + if keygen == nil || keygen.IsInterfaceNil() { + return nil, ErrNilKeyGenerator + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, ErrNilMarshalizer + } + + return &MessageProcessor{ + singleSigner: singleSigner, + keygen: keygen, + marshalizer: marshalizer, + }, nil +} + +// CreateHeartbeatFromP2pMessage will return a heartbeat if all the checks pass +func (mp *MessageProcessor) CreateHeartbeatFromP2pMessage(message p2p.MessageP2P) (*Heartbeat, error) { + if message == nil || message.IsInterfaceNil() { + return nil, ErrNilMessage + } + if message.Data() == nil { + return nil, ErrNilDataToProcess + } + + hbRecv := &Heartbeat{} + + err := mp.marshalizer.Unmarshal(hbRecv, message.Data()) + if err != nil { + return nil, err + } + + err = mp.verifySignature(hbRecv) + if err != nil { + return nil, err + } + + return hbRecv, nil +} + +func (mp *MessageProcessor) verifySignature(hbRecv *Heartbeat) error { + senderPubKey, err := mp.keygen.PublicKeyFromByteArray(hbRecv.Pubkey) + if err != nil { + return err + } + + copiedHeartbeat := *hbRecv + copiedHeartbeat.Signature = nil + buffCopiedHeartbeat, err := mp.marshalizer.Marshal(copiedHeartbeat) + if err != nil { + return err + } + + return mp.singleSigner.Verify(senderPubKey, buffCopiedHeartbeat, hbRecv.Signature) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mp *MessageProcessor) IsInterfaceNil() bool { + if mp == nil { + return true + } + return false +} diff --git a/node/heartbeat/monitor.go b/node/heartbeat/monitor.go index edb941c3083..153d9676b0b 100644 --- a/node/heartbeat/monitor.go +++ b/node/heartbeat/monitor.go @@ -1,7 +1,9 @@ package heartbeat import ( + "bytes" "encoding/hex" + "fmt" "sort" "strings" "sync" @@ -9,7 +11,6 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/statusHandler" @@ -19,64 +20,133 @@ var log = logger.DefaultLogger() // Monitor represents the heartbeat component that processes received heartbeat messages type Monitor struct { - singleSigner crypto.SingleSigner maxDurationPeerUnresponsive time.Duration - keygen crypto.KeyGenerator marshalizer marshal.Marshalizer heartbeatMessages map[string]*heartbeatMessageInfo mutHeartbeatMessages sync.RWMutex pubKeysMap map[uint32][]string + fullPeersSlice [][]byte mutPubKeysMap sync.RWMutex appStatusHandler core.AppStatusHandler + genesisTime time.Time + messageHandler MessageHandler + storer HeartbeatStorageHandler + timer Timer } // NewMonitor returns a new monitor instance func NewMonitor( - singleSigner crypto.SingleSigner, - keygen crypto.KeyGenerator, marshalizer marshal.Marshalizer, maxDurationPeerUnresponsive time.Duration, pubKeysMap map[uint32][]string, + genesisTime time.Time, + messageHandler MessageHandler, + storer HeartbeatStorageHandler, + timer Timer, ) (*Monitor, error) { - if singleSigner == nil || singleSigner.IsInterfaceNil() { - return nil, ErrNilSingleSigner - } - if keygen == nil || keygen.IsInterfaceNil() { - return nil, ErrNilKeyGenerator - } if marshalizer == nil || marshalizer.IsInterfaceNil() { return nil, ErrNilMarshalizer } if len(pubKeysMap) == 0 { return nil, ErrEmptyPublicKeysMap } - - pubKeysMapCopy := make(map[uint32][]string, 0) + if messageHandler == nil || messageHandler.IsInterfaceNil() { + return nil, ErrNilMessageHandler + } + if storer == nil || storer.IsInterfaceNil() { + return nil, ErrNilHeartbeatStorer + } + if timer == nil || timer.IsInterfaceNil() { + return nil, ErrNilTimer + } mon := &Monitor{ - singleSigner: singleSigner, - keygen: keygen, marshalizer: marshalizer, heartbeatMessages: make(map[string]*heartbeatMessageInfo), maxDurationPeerUnresponsive: maxDurationPeerUnresponsive, appStatusHandler: &statusHandler.NilStatusHandler{}, + genesisTime: genesisTime, + messageHandler: messageHandler, + storer: storer, + timer: timer, + } + + err := mon.storer.UpdateGenesisTime(genesisTime) + if err != nil { + return nil, err } + err = mon.initializeHeartbeatMessagesInfo(pubKeysMap) + if err != nil { + return nil, err + } + + err = mon.loadRestOfPubKeysFromStorage() + if err != nil { + log.Debug(fmt.Sprintf("heartbeat can't load public keys from storage: %s", err.Error())) + } + + return mon, nil +} + +func (m *Monitor) initializeHeartbeatMessagesInfo(pubKeysMap map[uint32][]string) error { + pubKeysMapCopy := make(map[uint32][]string, 0) for shardId, pubKeys := range pubKeysMap { for _, pubkey := range pubKeys { + err := m.loadHbmiFromStorer(pubkey) + if err != nil { // if pubKey not found in DB, create a new instance + mhbi, errNewHbmi := newHeartbeatMessageInfo(m.maxDurationPeerUnresponsive, true, m.genesisTime, m.timer) + if errNewHbmi != nil { + return errNewHbmi + } + + mhbi.genesisTime = m.genesisTime + mhbi.computedShardID = shardId + m.heartbeatMessages[pubkey] = mhbi + } pubKeysMapCopy[shardId] = append(pubKeysMapCopy[shardId], pubkey) - mhbi, err := newHeartbeatMessageInfo(maxDurationPeerUnresponsive, true) + } + } + + m.pubKeysMap = pubKeysMapCopy + return nil +} + +func (m *Monitor) loadRestOfPubKeysFromStorage() error { + peersSlice, err := m.storer.LoadKeys() + if err != nil { + return err + } + + for _, peer := range peersSlice { + _, ok := m.heartbeatMessages[string(peer)] + if !ok { // peer not in nodes map + err = m.loadHbmiFromStorer(string(peer)) if err != nil { - return nil, err + continue } - - mhbi.computedShardID = shardId - mon.heartbeatMessages[pubkey] = mhbi } } - mon.pubKeysMap = pubKeysMapCopy - return mon, nil + + return nil +} + +func (m *Monitor) loadHbmiFromStorer(pubKey string) error { + hbmiDTO, err := m.storer.LoadHbmiDTO(pubKey) + if err != nil { + return err + } + + receivedHbmi := m.convertFromExportedStruct(*hbmiDTO, m.maxDurationPeerUnresponsive) + receivedHbmi.getTimeHandler = m.timer.Now + receivedHbmi.isActive = m.timer.Now().Sub(receivedHbmi.lastUptimeDowntime) <= m.maxDurationPeerUnresponsive + receivedHbmi.lastUptimeDowntime = m.timer.Now() + receivedHbmi.genesisTime = m.genesisTime + + m.heartbeatMessages[pubKey] = &receivedHbmi + + return nil } // SetAppStatusHandler will set the AppStatusHandler which will be used for monitoring @@ -92,46 +162,67 @@ func (m *Monitor) SetAppStatusHandler(ash core.AppStatusHandler) error { // ProcessReceivedMessage satisfies the p2p.MessageProcessor interface so it can be called // by the p2p subsystem each time a new heartbeat message arrives func (m *Monitor) ProcessReceivedMessage(message p2p.MessageP2P) error { - if message == nil || message.IsInterfaceNil() { - return ErrNilMessage - } - if message.Data() == nil { - return ErrNilDataToProcess - } - - hbRecv := &Heartbeat{} - - err := m.marshalizer.Unmarshal(hbRecv, message.Data()) - if err != nil { - return err - } - - err = m.verifySignature(hbRecv) + hbRecv, err := m.messageHandler.CreateHeartbeatFromP2pMessage(message) if err != nil { return err } //message is validated, process should be done async, method can return nil - go func(msg p2p.MessageP2P, hb *Heartbeat) { + go m.addHeartbeatMessageToMap(hbRecv) + go func() { m.mutHeartbeatMessages.Lock() defer m.mutHeartbeatMessages.Unlock() - pe := m.heartbeatMessages[string(hb.Pubkey)] - if pe == nil { - pe, err = newHeartbeatMessageInfo(m.maxDurationPeerUnresponsive, false) - if err != nil { - log.Error(err.Error()) - return - } - m.heartbeatMessages[string(hb.Pubkey)] = pe + m.computeAllHeartbeatMessages() + }() + + return nil +} + +func (m *Monitor) addHeartbeatMessageToMap(hb *Heartbeat) { + m.mutHeartbeatMessages.Lock() + defer m.mutHeartbeatMessages.Unlock() + + pubKeyStr := string(hb.Pubkey) + hbmi, ok := m.heartbeatMessages[pubKeyStr] + if hbmi == nil || !ok { + var err error + hbmi, err = newHeartbeatMessageInfo(m.maxDurationPeerUnresponsive, false, m.genesisTime, m.timer) + if err != nil { + log.Error(err.Error()) + return } + m.heartbeatMessages[pubKeyStr] = hbmi + } - computedShardID := m.computeShardID(string(hb.Pubkey)) - pe.HeartbeatReceived(computedShardID, hb.ShardID, hb.VersionNumber, hb.NodeDisplayName) - m.updateAllHeartbeatMessages() - }(message, hbRecv) + computedShardID := m.computeShardID(pubKeyStr) + hbmi.HeartbeatReceived(computedShardID, hb.ShardID, hb.VersionNumber, hb.NodeDisplayName) + hbDTO := m.convertToExportedStruct(hbmi) + err := m.storer.SavePubkeyData(hb.Pubkey, &hbDTO) + if err != nil { + log.Error(fmt.Sprintf("cannot save heartbeat to db: %s", err.Error())) + } + m.addPeerToFullPeersSlice(hb.Pubkey) +} - return nil +func (m *Monitor) addPeerToFullPeersSlice(pubKey []byte) { + if !m.isPeerInFullPeersSlice(pubKey) { + m.fullPeersSlice = append(m.fullPeersSlice, pubKey) + err := m.storer.SaveKeys(m.fullPeersSlice) + if err != nil { + log.Error(fmt.Sprintf("can't store the keys slice: %s", err.Error())) + } + } +} + +func (m *Monitor) isPeerInFullPeersSlice(pubKey []byte) bool { + for _, peer := range m.fullPeersSlice { + if bytes.Equal(peer, pubKey) { + return true + } + } + + return false } func (m *Monitor) computeShardID(pubkey string) uint32 { @@ -151,27 +242,11 @@ func (m *Monitor) computeShardID(pubkey string) uint32 { return m.heartbeatMessages[pubkey].computedShardID } -func (m *Monitor) verifySignature(hbRecv *Heartbeat) error { - senderPubKey, err := m.keygen.PublicKeyFromByteArray(hbRecv.Pubkey) - if err != nil { - return err - } - - copiedHeartbeat := *hbRecv - copiedHeartbeat.Signature = nil - buffCopiedHeartbeat, err := m.marshalizer.Marshal(copiedHeartbeat) - if err != nil { - return err - } - - return m.singleSigner.Verify(senderPubKey, buffCopiedHeartbeat, hbRecv.Signature) -} - -func (m *Monitor) updateAllHeartbeatMessages() { +func (m *Monitor) computeAllHeartbeatMessages() { counterActiveValidators := 0 counterConnectedNodes := 0 for _, v := range m.heartbeatMessages { - v.updateFields() + v.computeActive(m.timer.Now()) if v.isActive { counterConnectedNodes++ @@ -188,9 +263,11 @@ func (m *Monitor) updateAllHeartbeatMessages() { // GetHeartbeats returns the heartbeat status func (m *Monitor) GetHeartbeats() []PubKeyHeartbeat { - m.mutHeartbeatMessages.RLock() + m.mutHeartbeatMessages.Lock() status := make([]PubKeyHeartbeat, len(m.heartbeatMessages)) + m.computeAllHeartbeatMessages() + idx := 0 for k, v := range m.heartbeatMessages { status[idx] = PubKeyHeartbeat{ @@ -200,16 +277,15 @@ func (m *Monitor) GetHeartbeats() []PubKeyHeartbeat { IsActive: v.isActive, ReceivedShardID: v.receivedShardID, ComputedShardID: v.computedShardID, - TotalUpTime: v.totalUpTime, - TotalDownTime: v.totalDownTime, + TotalUpTime: int(v.totalUpTime.Seconds()), + TotalDownTime: int(v.totalDownTime.Seconds()), VersionNumber: v.versionNumber, IsValidator: v.isValidator, NodeDisplayName: v.nodeDisplayName, } idx++ - } - m.mutHeartbeatMessages.RUnlock() + m.mutHeartbeatMessages.Unlock() sort.Slice(status, func(i, j int) bool { return strings.Compare(status[i].HexPublicKey, status[j].HexPublicKey) < 0 @@ -225,3 +301,40 @@ func (m *Monitor) IsInterfaceNil() bool { } return false } + +func (m *Monitor) convertToExportedStruct(v *heartbeatMessageInfo) HeartbeatDTO { + return HeartbeatDTO{ + TimeStamp: v.timeStamp, + MaxInactiveTime: v.maxInactiveTime, + IsActive: v.isActive, + ReceivedShardID: v.receivedShardID, + ComputedShardID: v.computedShardID, + TotalUpTime: v.totalUpTime, + TotalDownTime: v.totalDownTime, + VersionNumber: v.versionNumber, + IsValidator: v.isValidator, + NodeDisplayName: v.nodeDisplayName, + LastUptimeDowntime: v.lastUptimeDowntime, + GenesisTime: v.genesisTime, + } +} + +func (m *Monitor) convertFromExportedStruct(hbDTO HeartbeatDTO, maxDuration time.Duration) heartbeatMessageInfo { + hbmi := heartbeatMessageInfo{ + maxDurationPeerUnresponsive: maxDuration, + maxInactiveTime: hbDTO.MaxInactiveTime, + timeStamp: hbDTO.TimeStamp, + isActive: hbDTO.IsActive, + totalUpTime: hbDTO.TotalUpTime, + totalDownTime: hbDTO.TotalDownTime, + receivedShardID: hbDTO.ReceivedShardID, + computedShardID: hbDTO.ComputedShardID, + versionNumber: hbDTO.VersionNumber, + nodeDisplayName: hbDTO.NodeDisplayName, + isValidator: hbDTO.IsValidator, + lastUptimeDowntime: hbDTO.LastUptimeDowntime, + genesisTime: hbDTO.GenesisTime, + } + + return hbmi +} diff --git a/node/heartbeat/monitor_test.go b/node/heartbeat/monitor_test.go index e5f9fa13ae8..a5e033c8530 100644 --- a/node/heartbeat/monitor_test.go +++ b/node/heartbeat/monitor_test.go @@ -7,83 +7,129 @@ import ( "testing" "time" - "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/heartbeat/storage" "github.com/ElrondNetwork/elrond-go/node/mock" + "github.com/ElrondNetwork/elrond-go/p2p" "github.com/stretchr/testify/assert" ) //------- NewMonitor -func TestNewMonitor_NilSingleSignerShouldErr(t *testing.T) { +func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( nil, - &mock.KeyGenMock{}, - &mock.MarshalizerMock{}, 0, map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{}, + th, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilSingleSigner, err) + assert.Equal(t, heartbeat.ErrNilMarshalizer, err) } -func TestNewMonitor_NilKeygenShouldErr(t *testing.T) { +func TestNewMonitor_EmptyPublicKeyListShouldErr(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - nil, &mock.MarshalizerMock{}, 0, - map[uint32][]string{0: {""}}, + make(map[uint32][]string), + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{}, + th, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilKeyGenerator, err) + assert.Equal(t, heartbeat.ErrEmptyPublicKeysMap, err) } -func TestNewMonitor_NilMarshalizerShouldErr(t *testing.T) { +func TestNewMonitor_NilMessageHandlerShouldErr(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, + &mock.MarshalizerMock{}, + 0, + map[uint32][]string{0: {""}}, + time.Now(), nil, + &mock.HeartbeatStorerStub{}, + th, + ) + + assert.Nil(t, mon) + assert.Equal(t, heartbeat.ErrNilMessageHandler, err) +} + +func TestNewMonitor_NilHeartbeatStorerShouldErr(t *testing.T) { + t.Parallel() + + th := &mock.MockTimer{} + mon, err := heartbeat.NewMonitor( + &mock.MarshalizerMock{}, 0, map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{}, + nil, + th, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrNilMarshalizer, err) + assert.Equal(t, heartbeat.ErrNilHeartbeatStorer, err) } -func TestNewMonitor_EmptyPublicKeyListShouldErr(t *testing.T) { +func TestNewMonitor_NilTimeHandlerShouldErr(t *testing.T) { t.Parallel() mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{}, 0, - make(map[uint32][]string), + map[uint32][]string{0: {""}}, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{}, + nil, ) assert.Nil(t, mon) - assert.Equal(t, heartbeat.ErrEmptyPublicKeysMap, err) + assert.Equal(t, heartbeat.ErrNilTimer, err) } func TestNewMonitor_OkValsShouldCreatePubkeyMap(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{}, 1, map[uint32][]string{0: {"pk1", "pk2"}}, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + }, + th, ) assert.NotNil(t, mon) @@ -95,6 +141,7 @@ func TestNewMonitor_OkValsShouldCreatePubkeyMap(t *testing.T) { func TestNewMonitor_ShouldComputeShardId(t *testing.T) { t.Parallel() + th := &mock.MockTimer{} pksPerShards := map[uint32][]string{ 0: {"pk0"}, 1: {"pk1"}, @@ -102,11 +149,26 @@ func TestNewMonitor_ShouldComputeShardId(t *testing.T) { maxDuration := time.Millisecond mon, err := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{}, maxDuration, pksPerShards, + time.Now(), + &mock.MessageHandlerStub{}, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + }, + th, ) assert.NotNil(t, mon) @@ -119,143 +181,54 @@ func TestNewMonitor_ShouldComputeShardId(t *testing.T) { //------- ProcessReceivedMessage -func TestMonitor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { - t.Parallel() - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, - &mock.MarshalizerMock{}, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(nil) - - assert.Equal(t, heartbeat.ErrNilMessage, err) -} - -func TestMonitor_ProcessReceivedMessageNilDataShouldErr(t *testing.T) { - t.Parallel() - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, - &mock.MarshalizerMock{}, - 0, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{}) - - assert.Equal(t, heartbeat.ErrNilDataToProcess, err) -} - -func TestMonitor_ProcessReceivedMessageMarshalFailsShouldErr(t *testing.T) { +func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { t.Parallel() - errExpected := errors.New("expected err") + pubKey := "pk1" + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{}, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { - return errExpected + (obj.(*heartbeat.Heartbeat)).Pubkey = []byte(pubKey) + return nil }, }, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) - - assert.Equal(t, errExpected, err) -} - -func TestMonitor_ProcessReceivedMessageWrongPubkeyShouldErr(t *testing.T) { - t.Parallel() - - errExpected := errors.New("expected err") - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignMock{}, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, errExpected + time.Second*1000, + map[uint32][]string{0: {pubKey}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil }, }, - &mock.MarshalizerMock{ - UnmarshalHandler: func(obj interface{}, buff []byte) error { + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { return nil }, - }, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) - - assert.Equal(t, errExpected, err) -} - -func TestMonitor_ProcessReceivedMessageVerifyFailsShouldErr(t *testing.T) { - t.Parallel() - - errExpected := errors.New("expected err") - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return errExpected + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { + LoadKeysCalled: func() ([][]byte, error) { return nil, nil }, - }, - &mock.MarshalizerMock{ - UnmarshalHandler: func(obj interface{}, buff []byte) error { - return nil - }, - }, - 1, - map[uint32][]string{0: {"pk1"}}, - ) - - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) - - assert.Equal(t, errExpected, err) -} - -func TestMonitor_ProcessReceivedMessageShouldWork(t *testing.T) { - t.Parallel() - - pubKey := "pk1" - - mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { return nil }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, - &mock.MarshalizerMock{ - UnmarshalHandler: func(obj interface{}, buff []byte) error { - (obj.(*heartbeat.Heartbeat)).Pubkey = []byte(pubKey) + SaveKeysCalled: func(peersSlice [][]byte) error { return nil }, }, - time.Second*1000, - map[uint32][]string{0: {pubKey}}, + th, ) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) + hb := heartbeat.Heartbeat{ + Pubkey: []byte(pubKey), + } + hbBytes, _ := json.Marshal(hb) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -271,17 +244,8 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { pubKey := "pk1" + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil - }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { (obj.(*heartbeat.Heartbeat)).Pubkey = []byte(pubKey) @@ -290,9 +254,39 @@ func TestMonitor_ProcessReceivedMessageWithNewPublicKey(t *testing.T) { }, time.Second*1000, map[uint32][]string{0: {"pk2"}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil + }, + }, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + SaveKeysCalled: func(peersSlice [][]byte) error { + return nil + }, + }, + th, ) - err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: []byte("")}) + hb := heartbeat.Heartbeat{ + Pubkey: []byte(pubKey), + } + hbBytes, _ := json.Marshal(hb) + err := mon.ProcessReceivedMessage(&mock.P2PMessageStub{DataField: hbBytes}) assert.Nil(t, err) //a delay is mandatory for the go routine to finish its job @@ -309,17 +303,8 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { pubKey := []byte("pk1") + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil - }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { var rcvdHb heartbeat.Heartbeat @@ -331,6 +316,32 @@ func TestMonitor_ProcessReceivedMessageWithNewShardID(t *testing.T) { }, time.Second*1000, map[uint32][]string{0: {"pk1"}}, + time.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil + }, + }, + &mock.HeartbeatStorerStub{ + UpdateGenesisTimeCalled: func(genesisTime time.Time) error { + return nil + }, + LoadHbmiDTOCalled: func(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return nil, errors.New("not found") + }, + LoadKeysCalled: func() ([][]byte, error) { + return nil, nil + }, + SavePubkeyDataCalled: func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return nil + }, + SaveKeysCalled: func(peersSlice [][]byte) error { + return nil + }, + }, + th, ) // First send from pk1 from shard 0 @@ -378,18 +389,9 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { pubKey1 := "pk1-should-stay-online" pubKey2 := "pk2-should-go-offline" - + storer, _ := storage.NewHeartbeatDbStorer(mock.NewStorerMock(), &mock.MarshalizerFake{}) + th := &mock.MockTimer{} mon, _ := heartbeat.NewMonitor( - &mock.SinglesignStub{ - VerifyCalled: func(public crypto.PublicKey, msg []byte, sig []byte) error { - return nil - }, - }, - &mock.KeyGenMock{ - PublicKeyFromByteArrayMock: func(b []byte) (key crypto.PublicKey, e error) { - return nil, nil - }, - }, &mock.MarshalizerMock{ UnmarshalHandler: func(obj interface{}, buff []byte) error { var rcvdHb heartbeat.Heartbeat @@ -399,8 +401,18 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { return nil }, }, - time.Millisecond*5, + time.Second*5, map[uint32][]string{0: {pubKey1, pubKey2}}, + th.Now(), + &mock.MessageHandlerStub{ + CreateHeartbeatFromP2pMessageCalled: func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + var rcvHb heartbeat.Heartbeat + _ = json.Unmarshal(message.Data(), &rcvHb) + return &rcvHb, nil + }, + }, + storer, + th, ) // First send from pk1 @@ -412,17 +424,20 @@ func TestMonitor_ProcessReceivedMessageShouldSetPeerInactive(t *testing.T) { assert.Nil(t, err) // set pk2 to inactive as max inactive time is lower - time.Sleep(6 * time.Millisecond) + time.Sleep(10 * time.Millisecond) + th.IncrementSeconds(6) // Check that both are added hbStatus := mon.GetHeartbeats() assert.Equal(t, 2, len(hbStatus)) + //assert.False(t, hbStatus[1].IsActive) // Now send a message from pk1 in order to see that pk2 is not active anymore err = sendHbMessageFromPubKey(pubKey1, mon) + time.Sleep(5 * time.Millisecond) assert.Nil(t, err) - time.Sleep(5 * time.Millisecond) + th.IncrementSeconds(4) hbStatus = mon.GetHeartbeats() diff --git a/node/heartbeat/realTimer.go b/node/heartbeat/realTimer.go new file mode 100644 index 00000000000..1fac46c253f --- /dev/null +++ b/node/heartbeat/realTimer.go @@ -0,0 +1,20 @@ +package heartbeat + +import "time" + +// RealTimer is an implementation of Timer and uses real time.now +type RealTimer struct { +} + +// Now returns the time.Now() Time +func (m *RealTimer) Now() time.Time { + return time.Now() +} + +// IsInterfaceNil verifies if the interface is nil +func (m *RealTimer) IsInterfaceNil() bool { + if m == nil { + return true + } + return false +} diff --git a/node/heartbeat/storage/heartbeatStorer.go b/node/heartbeat/storage/heartbeatStorer.go new file mode 100644 index 00000000000..bfe9aace5da --- /dev/null +++ b/node/heartbeat/storage/heartbeatStorer.go @@ -0,0 +1,160 @@ +package storage + +import ( + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.DefaultLogger() + +const peersKeysDbEntry = "keys" +const genesisTimeDbEntry = "genesisTime" + +// HeartbeatDbStorer is the struct which will handle storage operations for heartbeat +type HeartbeatDbStorer struct { + storer storage.Storer + marshalizer marshal.Marshalizer +} + +// NewHeartbeatDbStorer will create an instance of HeartbeatDbStorer +func NewHeartbeatDbStorer( + storer storage.Storer, + marshalizer marshal.Marshalizer, +) (*HeartbeatDbStorer, error) { + if storer == nil || storer.IsInterfaceNil() { + return nil, heartbeat.ErrNilMonitorDb + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, heartbeat.ErrNilMarshalizer + } + + return &HeartbeatDbStorer{ + storer: storer, + marshalizer: marshalizer, + }, nil +} + +// LoadGenesisTime will return the genesis time saved in the storer +func (hs *HeartbeatDbStorer) LoadGenesisTime() (time.Time, error) { + genesisTimeFromDbBytes, err := hs.storer.Get([]byte(genesisTimeDbEntry)) + if err != nil { + return time.Time{}, heartbeat.ErrFetchGenesisTimeFromDb + } + + var genesisTimeFromDb time.Time + err = hs.marshalizer.Unmarshal(&genesisTimeFromDb, genesisTimeFromDbBytes) + if err != nil { + return time.Time{}, heartbeat.ErrUnmarshalGenesisTime + } + + return genesisTimeFromDb, nil +} + +// UpdateGenesisTime will update the saved genesis time and will log if the genesis time changed +func (hs *HeartbeatDbStorer) UpdateGenesisTime(genesisTime time.Time) error { + + genesisTimeFromDb, err := hs.LoadGenesisTime() + if err != nil && err != heartbeat.ErrFetchGenesisTimeFromDb { + return err + } + + err = hs.saveGenesisTimeToDb(genesisTime) + if err != nil { + return err + } + + if genesisTimeFromDb != genesisTime { + log.Info(fmt.Sprintf("updated heartbeat's genesis time to %s", genesisTimeFromDb)) + } + + return nil +} + +func (hs *HeartbeatDbStorer) saveGenesisTimeToDb(genesisTime time.Time) error { + genesisTimeBytes, err := hs.marshalizer.Marshal(genesisTime) + if err != nil { + return heartbeat.ErrMarshalGenesisTime + } + + err = hs.storer.Put([]byte(genesisTimeDbEntry), genesisTimeBytes) + if err != nil { + return heartbeat.ErrStoreGenesisTimeToDb + } + + return nil +} + +// LoadHbmiDTO will return the HeartbeatDTO for the given public key from storage +func (hs *HeartbeatDbStorer) LoadHbmiDTO(pubKey string) (*heartbeat.HeartbeatDTO, error) { + pkbytes := []byte(pubKey) + + hbFromDB, err := hs.storer.Get(pkbytes) + if err != nil { + return nil, err + } + + heartbeatDto := heartbeat.HeartbeatDTO{} + err = hs.marshalizer.Unmarshal(&heartbeatDto, hbFromDB) + if err != nil { + return nil, err + } + + return &heartbeatDto, nil +} + +// LoadKeys will return the keys saved in the storer, representing public keys of all peers the node is connected to +func (hs *HeartbeatDbStorer) LoadKeys() ([][]byte, error) { + allKeysBytes, err := hs.storer.Get([]byte(peersKeysDbEntry)) + if err != nil { + return nil, err + } + + var peersSlice [][]byte + err = hs.marshalizer.Unmarshal(&peersSlice, allKeysBytes) + if err != nil { + return nil, err + } + + return peersSlice, nil +} + +// SaveKeys will update the keys for all connected peers +func (hs *HeartbeatDbStorer) SaveKeys(peersSlice [][]byte) error { + marshalizedFullPeersSlice, errMarsh := hs.marshalizer.Marshal(peersSlice) + if errMarsh != nil { + return errMarsh + } + + return hs.storer.Put([]byte(peersKeysDbEntry), marshalizedFullPeersSlice) +} + +// SavePubkeyData will add or update a HeartbeatDTO in the storer +func (hs *HeartbeatDbStorer) SavePubkeyData( + pubkey []byte, + heartbeat *heartbeat.HeartbeatDTO, +) error { + marshalizedHeartBeat, err := hs.marshalizer.Marshal(heartbeat) + if err != nil { + return err + } + + errStore := hs.storer.Put(pubkey, marshalizedHeartBeat) + if errStore != nil { + return errStore + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (hs *HeartbeatDbStorer) IsInterfaceNil() bool { + if hs == nil { + return true + } + return false +} diff --git a/node/mock/blockProcessorStub.go b/node/mock/blockProcessorStub.go index 928115b5241..f8033ae7f27 100644 --- a/node/mock/blockProcessorStub.go +++ b/node/mock/blockProcessorStub.go @@ -72,6 +72,10 @@ func (blProcMock BlockProcessorStub) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorStub) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorStub) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/node/mock/heartbeatStorerStub.go b/node/mock/heartbeatStorerStub.go new file mode 100644 index 00000000000..b68fba294ba --- /dev/null +++ b/node/mock/heartbeatStorerStub.go @@ -0,0 +1,44 @@ +package mock + +import ( + "time" + + "github.com/ElrondNetwork/elrond-go/node/heartbeat" +) + +type HeartbeatStorerStub struct { + LoadGenesisTimeCalled func() (time.Time, error) + UpdateGenesisTimeCalled func(genesisTime time.Time) error + LoadHbmiDTOCalled func(pubKey string) (*heartbeat.HeartbeatDTO, error) + SavePubkeyDataCalled func(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error + LoadKeysCalled func() ([][]byte, error) + SaveKeysCalled func(peersSlice [][]byte) error +} + +func (hss *HeartbeatStorerStub) LoadGenesisTime() (time.Time, error) { + return hss.LoadGenesisTimeCalled() +} + +func (hss *HeartbeatStorerStub) UpdateGenesisTime(genesisTime time.Time) error { + return hss.UpdateGenesisTimeCalled(genesisTime) +} + +func (hss *HeartbeatStorerStub) LoadHbmiDTO(pubKey string) (*heartbeat.HeartbeatDTO, error) { + return hss.LoadHbmiDTOCalled(pubKey) +} + +func (hss *HeartbeatStorerStub) SavePubkeyData(pubkey []byte, heartbeat *heartbeat.HeartbeatDTO) error { + return hss.SavePubkeyDataCalled(pubkey, heartbeat) +} + +func (hss *HeartbeatStorerStub) LoadKeys() ([][]byte, error) { + return hss.LoadKeysCalled() +} + +func (hss *HeartbeatStorerStub) SaveKeys(peersSlice [][]byte) error { + return hss.SaveKeysCalled(peersSlice) +} + +func (hss *HeartbeatStorerStub) IsInterfaceNil() bool { + return false +} diff --git a/node/mock/indexerMock.go b/node/mock/indexerMock.go new file mode 100644 index 00000000000..10de9fbdaa3 --- /dev/null +++ b/node/mock/indexerMock.go @@ -0,0 +1,45 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/core/indexer" + "github.com/ElrondNetwork/elrond-go/core/statistics" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" +) + +// IndexerMock is a mock implementation fot the Indexer interface +type IndexerMock struct { + SaveBlockCalled func(body block.Body, header *block.Header) +} + +func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { + panic("implement me") +} + +func (im *IndexerMock) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + panic("implement me") +} + +func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { + panic("implement me") +} + +func (im *IndexerMock) SaveRoundInfo(roundInfo indexer.RoundInfo) { + panic("implement me") +} + +func (im *IndexerMock) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (im *IndexerMock) IsInterfaceNil() bool { + if im == nil { + return true + } + return false +} + +func (im *IndexerMock) IsNilIndexer() bool { + return false +} diff --git a/node/mock/messageHandlerStub.go b/node/mock/messageHandlerStub.go new file mode 100644 index 00000000000..815f6265ab1 --- /dev/null +++ b/node/mock/messageHandlerStub.go @@ -0,0 +1,18 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/p2p" +) + +type MessageHandlerStub struct { + CreateHeartbeatFromP2pMessageCalled func(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) +} + +func (mhs *MessageHandlerStub) IsInterfaceNil() bool { + return false +} + +func (mhs *MessageHandlerStub) CreateHeartbeatFromP2pMessage(message p2p.MessageP2P) (*heartbeat.Heartbeat, error) { + return mhs.CreateHeartbeatFromP2pMessageCalled(message) +} diff --git a/node/mock/mockTimer.go b/node/mock/mockTimer.go new file mode 100644 index 00000000000..b86029048a8 --- /dev/null +++ b/node/mock/mockTimer.go @@ -0,0 +1,26 @@ +package mock + +import "time" + +type MockTimer struct { + seconds int64 +} + +func (m *MockTimer) Now() time.Time { + return time.Unix(m.seconds, 0) +} + +func (m *MockTimer) IsInterfaceNil() bool { + if m == nil { + return true + } + return false +} + +func (m *MockTimer) IncrementSeconds(value int) { + m.seconds += int64(value) +} + +func (m *MockTimer) SetSeconds(value int) { + m.seconds = int64(value) +} diff --git a/node/mock/nodesCoordinatorMock.go b/node/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..3388ae1da14 --- /dev/null +++ b/node/mock/nodesCoordinatorMock.go @@ -0,0 +1,119 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) +} + +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []sharding.Validator, err error) { + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness, round, shardId) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), + } + + return list, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) ConsensusGroupSize(shardId uint32) int { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm *NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} + +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + panic("implement me") +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/node/mock/poolsHolderStub.go b/node/mock/poolsHolderStub.go index 632f7e75ab5..1ea9c0a934e 100644 --- a/node/mock/poolsHolderStub.go +++ b/node/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher MetaHeadersNoncesCalled func() dataRetriever.Uint64SyncMapCacher @@ -48,6 +49,10 @@ func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCach return phs.UnsignedTransactionsCalled() } +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/node/mock/storerMock.go b/node/mock/storerMock.go new file mode 100644 index 00000000000..fb22eadfd00 --- /dev/null +++ b/node/mock/storerMock.go @@ -0,0 +1,62 @@ +package mock + +import ( + "encoding/base64" + "errors" + "fmt" + "sync" +) + +type StorerMock struct { + mut sync.Mutex + data map[string][]byte +} + +func NewStorerMock() *StorerMock { + return &StorerMock{ + data: make(map[string][]byte), + } +} + +func (sm *StorerMock) Put(key, data []byte) error { + sm.mut.Lock() + defer sm.mut.Unlock() + sm.data[string(key)] = data + + return nil +} + +func (sm *StorerMock) Get(key []byte) ([]byte, error) { + sm.mut.Lock() + defer sm.mut.Unlock() + + val, ok := sm.data[string(key)] + if !ok { + return nil, errors.New(fmt.Sprintf("key: %s not found", base64.StdEncoding.EncodeToString(key))) + } + + return val, nil +} + +func (sm *StorerMock) Has(key []byte) error { + return errors.New("not implemented") +} + +func (sm *StorerMock) Remove(key []byte) error { + return errors.New("not implemented") +} + +func (sm *StorerMock) ClearCache() { +} + +func (sm *StorerMock) DestroyUnit() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sm *StorerMock) IsInterfaceNil() bool { + if sm == nil { + return true + } + return false +} diff --git a/node/mock/storerStub.go b/node/mock/storerStub.go index e91427e4e76..af7d1b3ee16 100644 --- a/node/mock/storerStub.go +++ b/node/mock/storerStub.go @@ -3,8 +3,7 @@ package mock type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) - HasCalled func(key []byte) (bool, error) - HasOrAddCalled func(key []byte, value []byte) (bool, error) + HasCalled func(key []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -18,14 +17,10 @@ func (ss *StorerStub) Get(key []byte) ([]byte, error) { return ss.GetCalled(key) } -func (ss *StorerStub) Has(key []byte) (bool, error) { +func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) (bool, error) { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } @@ -37,3 +32,11 @@ func (ss *StorerStub) ClearCache() { func (ss *StorerStub) DestroyUnit() error { return ss.DestroyUnitCalled() } + +// IsInterfaceNil returns true if there is no value under the interface +func (ss *StorerStub) IsInterfaceNil() bool { + if ss == nil { + return true + } + return false +} diff --git a/node/mock/validatorMock.go b/node/mock/validatorMock.go new file mode 100644 index 00000000000..e4f9bf01af8 --- /dev/null +++ b/node/mock/validatorMock.go @@ -0,0 +1,32 @@ +package mock + +import ( + "math/big" +) + +type ValidatorMock struct { + stake *big.Int + rating int32 + pubKey []byte + address []byte +} + +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} +} + +func (vm *ValidatorMock) Stake() *big.Int { + return vm.stake +} + +func (vm *ValidatorMock) Rating() int32 { + return vm.rating +} + +func (vm *ValidatorMock) PubKey() []byte { + return vm.pubKey +} + +func (vm *ValidatorMock) Address() []byte { + return vm.address +} diff --git a/node/node.go b/node/node.go index 5b55d2821a0..4e10c5bc929 100644 --- a/node/node.go +++ b/node/node.go @@ -15,9 +15,8 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/chronology" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" - "github.com/ElrondNetwork/elrond-go/consensus/validators" - "github.com/ElrondNetwork/elrond-go/consensus/validators/groupSelectors" "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/core/partitioning" "github.com/ElrondNetwork/elrond-go/crypto" @@ -29,6 +28,7 @@ import ( "github.com/ElrondNetwork/elrond-go/hashing" "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/node/heartbeat" + "github.com/ElrondNetwork/elrond-go/node/heartbeat/storage" "github.com/ElrondNetwork/elrond-go/ntp" "github.com/ElrondNetwork/elrond-go/p2p" "github.com/ElrondNetwork/elrond-go/process" @@ -64,7 +64,6 @@ type Node struct { syncTimer ntp.SyncTimer rounder consensus.Rounder blockProcessor process.BlockProcessor - blockTracker process.BlocksTracker genesisTime time.Time accounts state.AccountsAdapter addrConverter state.AddressConverter @@ -90,6 +89,7 @@ type Node struct { metaDataPool dataRetriever.MetaPoolsHolder store dataRetriever.StorageService shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator consensusTopic string consensusType string @@ -98,6 +98,8 @@ type Node struct { txStorageSize uint32 currentSendingGoRoutines int32 bootstrapRoundIndex uint64 + + indexer indexer.Indexer } // ApplyOptions can set up different configurable options of a Node instance @@ -257,7 +259,6 @@ func (n *Node) StartConsensus() error { worker, err := spos.NewWorker( consensusService, n.blockProcessor, - n.blockTracker, bootstrapper, broadcastMessenger, consensusState, @@ -278,15 +279,9 @@ func (n *Node) StartConsensus() error { return err } - validatorGroupSelector, err := n.createValidatorGroupSelector() - if err != nil { - return err - } - consensusDataContainer, err := spos.NewConsensusCore( n.blkc, n.blockProcessor, - n.blockTracker, bootstrapper, broadcastMessenger, chronologyHandler, @@ -297,13 +292,14 @@ func (n *Node) StartConsensus() error { n.multiSigner, n.rounder, n.shardCoordinator, + n.nodesCoordinator, n.syncTimer, - validatorGroupSelector) + ) if err != nil { return err } - fct, err := sposFactory.GetSubroundsFactory(consensusDataContainer, consensusState, worker, n.consensusType, n.appStatusHandler) + fct, err := sposFactory.GetSubroundsFactory(consensusDataContainer, consensusState, worker, n.consensusType, n.appStatusHandler, n.indexer) if err != nil { return err } @@ -452,37 +448,6 @@ func (n *Node) createConsensusState() (*spos.ConsensusState, error) { return consensusState, nil } -// createValidatorGroupSelector creates a index hashed group selector object -func (n *Node) createValidatorGroupSelector() (consensus.ValidatorGroupSelector, error) { - validatorGroupSelector, err := groupSelectors.NewIndexHashedGroupSelector(n.consensusGroupSize, n.hasher) - if err != nil { - return nil, err - } - - validatorsList := make([]consensus.Validator, 0) - shID := n.shardCoordinator.SelfId() - - if len(n.initialNodesPubkeys[shID]) == 0 { - return nil, errors.New("could not create validator group as shardID is out of range") - } - - for i := 0; i < len(n.initialNodesPubkeys[shID]); i++ { - validator, err := validators.NewValidator(big.NewInt(0), 0, []byte(n.initialNodesPubkeys[shID][i])) - if err != nil { - return nil, err - } - - validatorsList = append(validatorsList, validator) - } - - err = validatorGroupSelector.LoadEligibleList(validatorsList) - if err != nil { - return nil, err - } - - return validatorGroupSelector, nil -} - // createConsensusTopic creates a consensus topic for node func (n *Node) createConsensusTopic(messageProcessor p2p.MessageProcessor, shardCoordinator sharding.Coordinator) error { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -737,12 +702,12 @@ func (n *Node) GetAccount(address string) (*state.Account, error) { } // StartHeartbeat starts the node's heartbeat processing/signaling module -func (n *Node) StartHeartbeat(config config.HeartbeatConfig, versionNumber string, nodeDisplayName string) error { - if !config.Enabled { +func (n *Node) StartHeartbeat(hbConfig config.HeartbeatConfig, versionNumber string, nodeDisplayName string) error { + if !hbConfig.Enabled { return nil } - err := n.checkConfigParams(config) + err := n.checkConfigParams(hbConfig) if err != nil { return err } @@ -772,12 +737,25 @@ func (n *Node) StartHeartbeat(config config.HeartbeatConfig, versionNumber strin return err } - n.heartbeatMonitor, err = heartbeat.NewMonitor( + heartbeatStorageUnit := n.store.GetStorer(dataRetriever.HeartbeatUnit) + heartBeatMsgProcessor, err := heartbeat.NewMessageProcessor( n.singleSigner, n.keyGen, + n.marshalizer) + if err != nil { + return err + } + + heartbeatStorer, err := storage.NewHeartbeatDbStorer(heartbeatStorageUnit, n.marshalizer) + timer := &heartbeat.RealTimer{} + n.heartbeatMonitor, err = heartbeat.NewMonitor( n.marshalizer, - time.Second*time.Duration(config.DurationInSecToConsiderUnresponsive), + time.Second*time.Duration(hbConfig.DurationInSecToConsiderUnresponsive), n.initialNodesPubkeys, + n.genesisTime, + heartBeatMsgProcessor, + heartbeatStorer, + timer, ) if err != nil { return err @@ -793,7 +771,7 @@ func (n *Node) StartHeartbeat(config config.HeartbeatConfig, versionNumber strin return err } - go n.startSendingHeartbeats(config) + go n.startSendingHeartbeats(hbConfig) return nil } diff --git a/node/nodeTesting.go b/node/nodeTesting.go index 13528c1804f..1ea56ef41bd 100644 --- a/node/nodeTesting.go +++ b/node/nodeTesting.go @@ -228,11 +228,13 @@ func (n *Node) generateAndSignSingleTx( } tx := transaction.Transaction{ - Nonce: nonce, - Value: value, - RcvAddr: rcvAddrBytes, - SndAddr: sndAddrBytes, - Data: data, + Nonce: nonce, + Value: value, + GasLimit: 100, + GasPrice: 10, + RcvAddr: rcvAddrBytes, + SndAddr: sndAddrBytes, + Data: data, } marshalizedTx, err := n.marshalizer.Marshal(&tx) diff --git a/node/node_test.go b/node/node_test.go index bcc69c3795c..4a851404342 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -1015,6 +1015,11 @@ func TestNode_StartHeartbeatNilMarshalizerShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1052,6 +1057,11 @@ func TestNode_StartHeartbeatNilKeygenShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1081,6 +1091,11 @@ func TestNode_StartHeartbeatHasTopicValidatorShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1116,6 +1131,11 @@ func TestNode_StartHeartbeatCreateTopicFailsShouldErr(t *testing.T) { node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithTxSignPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1154,6 +1174,11 @@ func TestNode_StartHeartbeatRegisterMessageProcessorFailsShouldErr(t *testing.T) node.WithInitialNodesPubKeys(map[uint32][]string{0: {"pk1"}}), node.WithPrivKey(&mock.PrivateKeyStub{}), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1211,6 +1236,11 @@ func TestNode_StartHeartbeatShouldWorkAndCallSendHeartbeat(t *testing.T) { }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ MinTimeToWaitBetweenBroadcastsInSec: 1, @@ -1264,6 +1294,11 @@ func TestNode_StartHeartbeatShouldWorkAndHaveAllPublicKeys(t *testing.T) { }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ @@ -1318,6 +1353,11 @@ func TestNode_StartHeartbeatShouldSetNodesFromInitialPubKeysAsValidators(t *test }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ @@ -1377,6 +1417,11 @@ func TestNode_StartHeartbeatShouldWorkAndCanCallProcessMessage(t *testing.T) { }, }), node.WithShardCoordinator(mock.NewOneShardCoordinatorMock()), + node.WithDataStore(&mock.ChainStorerMock{ + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return mock.NewStorerMock() + }, + }), ) err := n.StartHeartbeat(config.HeartbeatConfig{ diff --git a/ntp/syncTime.go b/ntp/syncTime.go index 4241a78ec31..98aa38a767d 100644 --- a/ntp/syncTime.go +++ b/ntp/syncTime.go @@ -150,6 +150,8 @@ func (s *syncTime) formatTime(time time.Time) string { // CurrentTime method gets the current time on which is added the current offset func (s *syncTime) CurrentTime() time.Time { + s.mut.RLock() + defer s.mut.RUnlock() return time.Now().Add(s.clockOffset) } diff --git a/p2p/libp2p/issues_test.go b/p2p/libp2p/issues_test.go index 4ab076a1638..861c7e79955 100644 --- a/p2p/libp2p/issues_test.go +++ b/p2p/libp2p/issues_test.go @@ -48,15 +48,19 @@ func createMessenger(port int) p2p.Messenger { // Next message that the sender tries to send will cause a new error to be logged and no data to be sent // The fix consists in the full stream closing when an error occurs during writing. func TestIssueEN898_StreamResetError(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + mes1 := createMessenger(23100) mes2 := createMessenger(23101) defer func() { - mes1.Close() - mes2.Close() + _ = mes1.Close() + _ = mes2.Close() }() - mes1.ConnectToPeer(getConnectableAddress(mes2)) + _ = mes1.ConnectToPeer(getConnectableAddress(mes2)) topic := "test topic" @@ -74,8 +78,8 @@ func TestIssueEN898_StreamResetError(t *testing.T) { smallPacketReceived := &atomic.Value{} smallPacketReceived.Store(false) - mes2.CreateTopic(topic, false) - mes2.RegisterMessageProcessor(topic, &mock.MessageProcessorStub{ + _ = mes2.CreateTopic(topic, false) + _ = mes2.RegisterMessageProcessor(topic, &mock.MessageProcessorStub{ ProcessMessageCalled: func(message p2p.MessageP2P) error { if bytes.Equal(message.Data(), largePacket) { largePacketReceived.Store(true) @@ -90,12 +94,12 @@ func TestIssueEN898_StreamResetError(t *testing.T) { }) fmt.Println("sending the large packet...") - mes1.SendToConnectedPeer(topic, largePacket, mes2.ID()) + _ = mes1.SendToConnectedPeer(topic, largePacket, mes2.ID()) time.Sleep(time.Second) fmt.Println("sending the small packet...") - mes1.SendToConnectedPeer(topic, smallPacket, mes2.ID()) + _ = mes1.SendToConnectedPeer(topic, smallPacket, mes2.ID()) time.Sleep(time.Second) diff --git a/p2p/libp2p/netMessenger.go b/p2p/libp2p/netMessenger.go index 36db5498907..1b570adce01 100644 --- a/p2p/libp2p/netMessenger.go +++ b/p2p/libp2p/netMessenger.go @@ -31,6 +31,8 @@ const DirectSendID = protocol.ID("/directsend/1.0.0") const refreshPeersOnTopic = time.Second * 60 const ttlPeersOnTopic = time.Second * 120 +const pubsubTimeCacheDuration = 10 * time.Minute + //TODO remove the header size of the message when commit d3c5ecd3a3e884206129d9f2a9a4ddfd5e7c8951 from // https://github.com/libp2p/go-libp2p-pubsub/pull/189/commits will be part of a new release var messageHeader = 64 * 1024 //64kB @@ -178,6 +180,8 @@ func createPubSub(ctxProvider *Libp2pContext, withSigning bool) (*pubsub.PubSub, pubsub.WithMessageSigning(withSigning), } + pubsub.TimeCacheDuration = pubsubTimeCacheDuration + ps, err := pubsub.NewGossipSub(ctxProvider.Context(), ctxProvider.Host(), optsPS...) if err != nil { return nil, err diff --git a/p2p/libp2p/netMessenger_test.go b/p2p/libp2p/netMessenger_test.go index 73c57d9caf6..1edf848cea6 100644 --- a/p2p/libp2p/netMessenger_test.go +++ b/p2p/libp2p/netMessenger_test.go @@ -261,6 +261,11 @@ func TestNewNetworkMessenger_NoConnMgrShouldWork(t *testing.T) { } func TestNewNetworkMessenger_WithConnMgrShouldWork(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -294,6 +299,11 @@ func TestNewNetworkMessenger_WithConnMgrShouldWork(t *testing.T) { } func TestNewNetworkMessenger_WithNullPeerDiscoveryShouldWork(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -344,6 +354,11 @@ func TestNewNetworkMessenger_NilPeerDiscoveryShouldErr(t *testing.T) { } func TestNewNetworkMessenger_PeerDiscovererFailsWhenApplyingContextShouldErr(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + port := 4000 _, sk := createLibP2PCredentialsMessenger() @@ -374,6 +389,11 @@ func TestNewNetworkMessenger_PeerDiscovererFailsWhenApplyingContextShouldErr(t * } func TestNewNetworkMessengerWithPortSweep_ShouldFindFreePort(t *testing.T) { + //TODO remove skip when external library is concurrent safe + if testing.Short() { + t.Skip("this test fails with race detector on because of the github.com/koron/go-ssdp lib") + } + _, sk := createLibP2PCredentialsMessenger() mes, err := libp2p.NewNetworkMessengerOnFreePort( diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index 1271618735f..ab26abd7b5d 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -15,23 +15,32 @@ import ( // ArgBaseProcessor holds all dependencies required by the process data factory in order to create // new instances type ArgBaseProcessor struct { - Accounts state.AccountsAdapter - ForkDetector process.ForkDetector - Hasher hashing.Hasher - Marshalizer marshal.Marshalizer - Store dataRetriever.StorageService - ShardCoordinator sharding.Coordinator - Uint64Converter typeConverters.Uint64ByteSliceConverter - StartHeaders map[uint32]data.HeaderHandler - RequestHandler process.RequestHandler - Core serviceContainer.Core + Accounts state.AccountsAdapter + ForkDetector process.ForkDetector + Hasher hashing.Hasher + Marshalizer marshal.Marshalizer + Store dataRetriever.StorageService + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + SpecialAddressHandler process.SpecialAddressHandler + Uint64Converter typeConverters.Uint64ByteSliceConverter + StartHeaders map[uint32]data.HeaderHandler + RequestHandler process.RequestHandler + Core serviceContainer.Core } // ArgShardProcessor holds all dependencies required by the process data factory in order to create // new instances of shard processor type ArgShardProcessor struct { - *ArgBaseProcessor - DataPool dataRetriever.PoolsHolder - BlocksTracker process.BlocksTracker - TxCoordinator process.TransactionCoordinator + ArgBaseProcessor + DataPool dataRetriever.PoolsHolder + TxCoordinator process.TransactionCoordinator + TxsPoolsCleaner process.PoolsCleaner +} + +// ArgMetaProcessor holds all dependencies required by the process data factory in order to create +// new instances of meta processor +type ArgMetaProcessor struct { + ArgBaseProcessor + DataPool dataRetriever.MetaPoolsHolder } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 5a804c0b503..35332f2f535 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -27,17 +27,39 @@ type hashAndHdr struct { hash []byte } +type nonceAndHashInfo struct { + hash []byte + nonce uint64 +} + +type hdrInfo struct { + usedInBlock bool + hdr data.HeaderHandler +} + +type hdrForBlock struct { + missingHdrs uint32 + missingFinalityAttestingHdrs uint32 + highestHdrNonce map[uint32]uint64 + mutHdrsForBlock sync.RWMutex + hdrHashAndInfo map[string]*hdrInfo +} + type mapShardHeaders map[uint32][]data.HeaderHandler type baseProcessor struct { - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - forkDetector process.ForkDetector - hasher hashing.Hasher - marshalizer marshal.Marshalizer - store dataRetriever.StorageService - uint64Converter typeConverters.Uint64ByteSliceConverter - blockSizeThrottler process.BlockSizeThrottler + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + specialAddressHandler process.SpecialAddressHandler + accounts state.AccountsAdapter + forkDetector process.ForkDetector + hasher hashing.Hasher + marshalizer marshal.Marshalizer + store dataRetriever.StorageService + uint64Converter typeConverters.Uint64ByteSliceConverter + blockSizeThrottler process.BlockSizeThrottler + + hdrsForCurrBlock hdrForBlock mutNotarizedHdrs sync.RWMutex notarizedHdrs mapShardHeaders @@ -112,28 +134,28 @@ func (bp *baseProcessor) checkBlockValidity( return nil } - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + log.Info(fmt.Sprintf("hash does not match: local block hash is %s and node received block with previous hash %s\n", core.ToB64(chainHandler.GetGenesisHeaderHash()), core.ToB64(headerHandler.GetPrevHash()))) return process.ErrBlockHashDoesNotMatch } - log.Info(fmt.Sprintf("nonce not match: local block nonce is 0 and node received block with nonce %d\n", + log.Info(fmt.Sprintf("nonce does not match: local block nonce is 0 and node received block with nonce %d\n", headerHandler.GetNonce())) return process.ErrWrongNonceInBlock } if headerHandler.GetRound() <= currentBlockHeader.GetRound() { - log.Info(fmt.Sprintf("round not match: local block round is %d and node received block with round %d\n", + log.Info(fmt.Sprintf("round does not match: local block round is %d and node received block with round %d\n", currentBlockHeader.GetRound(), headerHandler.GetRound())) return process.ErrLowerRoundInBlock } if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { - log.Info(fmt.Sprintf("nonce not match: local block nonce is %d and node received block with nonce %d\n", + log.Info(fmt.Sprintf("nonce does not match: local block nonce is %d and node received block with nonce %d\n", currentBlockHeader.GetNonce(), headerHandler.GetNonce())) return process.ErrWrongNonceInBlock @@ -144,20 +166,20 @@ func (bp *baseProcessor) checkBlockValidity( return err } - if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { - log.Info(fmt.Sprintf("random seed not match: local block random seed is %s and node received block with previous random seed %s\n", - core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) - - return process.ErrRandSeedMismatch - } - if !bytes.Equal(headerHandler.GetPrevHash(), prevHeaderHash) { - log.Info(fmt.Sprintf("hash not match: local block hash is %s and node received block with previous hash %s\n", + log.Info(fmt.Sprintf("hash does not match: local block hash is %s and node received block with previous hash %s\n", core.ToB64(prevHeaderHash), core.ToB64(headerHandler.GetPrevHash()))) return process.ErrBlockHashDoesNotMatch } + if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { + log.Info(fmt.Sprintf("random seed does not match: local block random seed is %s and node received block with previous random seed %s\n", + core.ToB64(currentBlockHeader.GetRandSeed()), core.ToB64(headerHandler.GetPrevRandSeed()))) + + return process.ErrRandSeedDoesNotMatch + } + if bodyHandler != nil { // TODO: add bodyHandler verification here } @@ -202,7 +224,7 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand } // block with nonce 0 was already saved if prevHdr.GetRootHash() != nil { - return process.ErrRootStateMissmatch + return process.ErrRootStateDoesNotMatch } return nil } @@ -210,10 +232,14 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand //TODO: add verification if rand seed was correctly computed add other verification //TODO: check here if the 2 header blocks were correctly signed and the consensus group was correctly elected if prevHdr.GetRound() >= currHdr.GetRound() { - return process.ErrLowerRoundInOtherChainBlock + log.Debug(fmt.Sprintf("round does not match in shard %d: local block round is %d and node received block with round %d\n", + currHdr.GetShardID(), prevHdr.GetRound(), currHdr.GetRound())) + return process.ErrLowerRoundInBlock } if currHdr.GetNonce() != prevHdr.GetNonce()+1 { + log.Debug(fmt.Sprintf("nonce does not match in shard %d: local block nonce is %d and node received block with nonce %d\n", + currHdr.GetShardID(), prevHdr.GetNonce(), currHdr.GetNonce())) return process.ErrWrongNonceInBlock } @@ -222,12 +248,16 @@ func (bp *baseProcessor) isHdrConstructionValid(currHdr, prevHdr data.HeaderHand return err } - if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { - return process.ErrRandSeedMismatch + if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { + log.Debug(fmt.Sprintf("block hash does not match in shard %d: local block hash is %s and node received block with previous hash %s\n", + currHdr.GetShardID(), core.ToB64(prevHeaderHash), core.ToB64(currHdr.GetPrevHash()))) + return process.ErrBlockHashDoesNotMatch } - if !bytes.Equal(currHdr.GetPrevHash(), prevHeaderHash) { - return process.ErrHashDoesNotMatchInOtherChainBlock + if !bytes.Equal(currHdr.GetPrevRandSeed(), prevHdr.GetRandSeed()) { + log.Debug(fmt.Sprintf("random seed does not match in shard %d: local block random seed is %s and node received block with previous random seed %s\n", + currHdr.GetShardID(), core.ToB64(prevHdr.GetRandSeed()), core.ToB64(currHdr.GetPrevRandSeed()))) + return process.ErrRandSeedDoesNotMatch } return nil @@ -381,10 +411,7 @@ func (bp *baseProcessor) requestHeadersIfMissing(sortedHdrs []data.HeaderHandler return err } - isLastNotarizedCloseToOurRound := maxRound-prevHdr.GetRound() <= process.MaxHeaderRequestsAllowed - if len(sortedHdrs) == 0 && isLastNotarizedCloseToOurRound { - return process.ErrNoSortedHdrsForShard - } + highestHdr := prevHdr missingNonces := make([]uint64, 0) for i := 0; i < len(sortedHdrs); i++ { @@ -407,12 +434,15 @@ func (bp *baseProcessor) requestHeadersIfMissing(sortedHdrs []data.HeaderHandler missingNonces = append(missingNonces, j) } } + + highestHdr = currHdr } // ask for headers, if there most probably should be - if len(missingNonces) == 0 && !isLastNotarizedCloseToOurRound { - startNonce := prevHdr.GetNonce() + 1 - for nonce := startNonce; nonce < startNonce+process.MaxHeaderRequestsAllowed; nonce++ { + if maxRound > highestHdr.GetRound() { + nbHeaderRequests := maxRound - highestHdr.GetRound() + startNonce := highestHdr.GetNonce() + 1 + for nonce := startNonce; nonce < startNonce+nbHeaderRequests; nonce++ { missingNonces = append(missingNonces, nonce) } } @@ -482,37 +512,108 @@ func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { } // checkProcessorNilParameters will check the imput parameters for nil values -func checkProcessorNilParameters( - accounts state.AccountsAdapter, - forkDetector process.ForkDetector, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - shardCoordinator sharding.Coordinator, - uint64Converter typeConverters.Uint64ByteSliceConverter, -) error { +func checkProcessorNilParameters(arguments ArgBaseProcessor) error { - if accounts == nil || accounts.IsInterfaceNil() { + if arguments.Accounts == nil || arguments.Accounts.IsInterfaceNil() { return process.ErrNilAccountsAdapter } - if forkDetector == nil || forkDetector.IsInterfaceNil() { + if arguments.ForkDetector == nil || arguments.ForkDetector.IsInterfaceNil() { return process.ErrNilForkDetector } - if hasher == nil || hasher.IsInterfaceNil() { + if arguments.Hasher == nil || arguments.Hasher.IsInterfaceNil() { return process.ErrNilHasher } - if marshalizer == nil || marshalizer.IsInterfaceNil() { + if arguments.Marshalizer == nil || arguments.Marshalizer.IsInterfaceNil() { return process.ErrNilMarshalizer } - if store == nil || store.IsInterfaceNil() { + if arguments.Store == nil || arguments.Store.IsInterfaceNil() { return process.ErrNilStorage } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + if arguments.ShardCoordinator == nil || arguments.ShardCoordinator.IsInterfaceNil() { return process.ErrNilShardCoordinator } - if uint64Converter == nil || uint64Converter.IsInterfaceNil() { + if arguments.NodesCoordinator == nil || arguments.NodesCoordinator.IsInterfaceNil() { + return process.ErrNilNodesCoordinator + } + if arguments.SpecialAddressHandler == nil || arguments.SpecialAddressHandler.IsInterfaceNil() { + return process.ErrNilSpecialAddressHandler + } + if arguments.Uint64Converter == nil || arguments.Uint64Converter.IsInterfaceNil() { return process.ErrNilUint64Converter } + if arguments.RequestHandler == nil || arguments.RequestHandler.IsInterfaceNil() { + return process.ErrNilRequestHandler + } return nil } + +func (bp *baseProcessor) createBlockStarted() { + bp.resetMissingHdrs() + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) + bp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + +func (bp *baseProcessor) resetMissingHdrs() { + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.missingHdrs = 0 + bp.hdrsForCurrBlock.missingFinalityAttestingHdrs = 0 + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + +//TODO: remove bool parameter and give instead the set to sort +func (bp *baseProcessor) sortHeadersForCurrentBlockByNonce(usedInBlock bool) map[uint32][]data.HeaderHandler { + hdrsForCurrentBlock := make(map[uint32][]data.HeaderHandler) + + bp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for _, hdrInfo := range bp.hdrsForCurrBlock.hdrHashAndInfo { + if hdrInfo.usedInBlock != usedInBlock { + continue + } + + hdrsForCurrentBlock[hdrInfo.hdr.GetShardID()] = append(hdrsForCurrentBlock[hdrInfo.hdr.GetShardID()], hdrInfo.hdr) + } + bp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + // sort headers for each shard + for _, hdrsForShard := range hdrsForCurrentBlock { + process.SortHeadersByNonce(hdrsForShard) + } + + return hdrsForCurrentBlock +} + +//TODO: remove bool parameter and give instead the set to sort +func (bp *baseProcessor) sortHeaderHashesForCurrentBlockByNonce(usedInBlock bool) map[uint32][][]byte { + hdrsForCurrentBlockInfo := make(map[uint32][]*nonceAndHashInfo) + + bp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for metaBlockHash, hdrInfo := range bp.hdrsForCurrBlock.hdrHashAndInfo { + if hdrInfo.usedInBlock != usedInBlock { + continue + } + + hdrsForCurrentBlockInfo[hdrInfo.hdr.GetShardID()] = append(hdrsForCurrentBlockInfo[hdrInfo.hdr.GetShardID()], + &nonceAndHashInfo{nonce: hdrInfo.hdr.GetNonce(), hash: []byte(metaBlockHash)}) + } + bp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + for _, hdrsForShard := range hdrsForCurrentBlockInfo { + if len(hdrsForShard) > 1 { + sort.Slice(hdrsForShard, func(i, j int) bool { + return hdrsForShard[i].nonce < hdrsForShard[j].nonce + }) + } + } + + hdrsHashesForCurrentBlock := make(map[uint32][][]byte) + for shardId, hdrsForShard := range hdrsForCurrentBlockInfo { + for _, hdrForShard := range hdrsForShard { + hdrsHashesForCurrentBlock[shardId] = append(hdrsHashesForCurrentBlock[shardId], hdrForShard.hash) + } + } + + return hdrsHashesForCurrentBlock +} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 92e22f25076..01b5afe86db 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -3,12 +3,14 @@ package block_test import ( "bytes" "errors" + "math/big" "reflect" "testing" "time" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/hashing" @@ -47,68 +49,58 @@ func generateTestUnit() storage.Storer { return storer } +func createShardedDataChacherNotifier( + handler data.TransactionHandler, + testHash []byte, +) func() dataRetriever.ShardedDataCacherNotifier { + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } +} + func initDataPool(testHash []byte) *mock.PoolsHolderStub { + rewardTx := &rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: big.NewInt(10), + RcvAddr: []byte("receiver"), + ShardId: 0, + } + txCalled := createShardedDataChacherNotifier(&transaction.Transaction{Nonce: 10}, testHash) + unsignedTxCalled := createShardedDataChacherNotifier(&transaction.Transaction{Nonce: 10}, testHash) + rewardTransactionsCalled := createShardedDataChacherNotifier(rewardTx, testHash) + sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } - }, + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxCalled, + RewardTransactionsCalled: rewardTransactionsCalled, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, syncMap dataRetriever.ShardIdHashMap) {}, @@ -253,10 +245,11 @@ func initStore() *dataRetriever.ChainStorer { return store } -func createDummyMetaBlock(destShardId uint32, senderShardId uint32, miniBlockHashes ...[]byte) data.HeaderHandler { +func createDummyMetaBlock(destShardId uint32, senderShardId uint32, miniBlockHashes ...[]byte) *block.MetaBlock { metaBlock := &block.MetaBlock{ ShardInfo: []block.ShardData{ { + ShardId: senderShardId, ShardMiniBlockHeaders: make([]block.ShardMiniBlockHeader, len(miniBlockHashes)), }, }, @@ -315,22 +308,31 @@ func (wr *wrongBody) IsInterfaceNil() bool { } func CreateMockArguments() blproc.ArgShardProcessor { + nodesCoordinator := mock.NewNodesCoordinatorMock() + shardCoordinator := mock.NewOneShardCoordinatorMock() + specialAddressHandler := mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ) arguments := blproc.ArgShardProcessor{ - ArgBaseProcessor: &blproc.ArgBaseProcessor{ - Accounts: &mock.AccountsStub{}, - ForkDetector: &mock.ForkDetectorMock{}, - Hasher: &mock.HasherStub{}, - Marshalizer: &mock.MarshalizerMock{}, - Store: initStore(), - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - StartHeaders: createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - RequestHandler: &mock.RequestHandlerMock{}, - Core: &mock.ServiceContainerMock{}, + ArgBaseProcessor: blproc.ArgBaseProcessor{ + Accounts: &mock.AccountsStub{}, + ForkDetector: &mock.ForkDetectorMock{}, + Hasher: &mock.HasherStub{}, + Marshalizer: &mock.MarshalizerMock{}, + Store: initStore(), + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + StartHeaders: createGenesisBlocks(mock.NewOneShardCoordinatorMock()), + RequestHandler: &mock.RequestHandlerMock{}, + Core: &mock.ServiceContainerMock{}, }, - DataPool: initDataPool([]byte("")), - BlocksTracker: &mock.BlocksTrackerMock{}, - TxCoordinator: &mock.TransactionCoordinatorMock{}, + DataPool: initDataPool([]byte("")), + TxCoordinator: &mock.TransactionCoordinatorMock{}, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } return arguments @@ -374,26 +376,19 @@ func TestBlockProcessor_CheckBlockValidity(t *testing.T) { assert.Equal(t, process.ErrWrongNonceInBlock, err) hdr.Nonce = 2 - hdr.PrevRandSeed = []byte("X") - err = bp.CheckBlockValidity(blkc, hdr, body) - assert.Equal(t, process.ErrRandSeedMismatch, err) - - hdr.PrevRandSeed = []byte("") hdr.PrevHash = []byte("X") err = bp.CheckBlockValidity(blkc, hdr, body) assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) - hdr.Nonce = 3 - hdr.PrevHash = []byte("") - err = bp.CheckBlockValidity(blkc, hdr, body) - assert.Equal(t, process.ErrWrongNonceInBlock, err) - - hdr.Nonce = 2 marshalizerMock := mock.MarshalizerMock{} hasherMock := mock.HasherMock{} prevHeader, _ := marshalizerMock.Marshal(blkc.GetCurrentBlockHeader()) hdr.PrevHash = hasherMock.Compute(string(prevHeader)) + hdr.PrevRandSeed = []byte("X") + err = bp.CheckBlockValidity(blkc, hdr, body) + assert.Equal(t, process.ErrRandSeedDoesNotMatch, err) + hdr.PrevRandSeed = []byte("") err = bp.CheckBlockValidity(blkc, hdr, body) assert.Nil(t, err) } diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 085189bb40c..24e8db47eed 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -180,7 +180,10 @@ func (txc *transactionCounter) displayTxBlockBody(lines []*display.LineData, bod for i := 0; i < len(body); i++ { miniBlock := body[i] - part := fmt.Sprintf("MiniBlock_%d->%d", miniBlock.SenderShardID, miniBlock.ReceiverShardID) + part := fmt.Sprintf("%s_MiniBlock_%d->%d", + miniBlock.Type.String(), + miniBlock.SenderShardID, + miniBlock.ReceiverShardID) if miniBlock.TxHashes == nil || len(miniBlock.TxHashes) == 0 { lines = append(lines, display.NewLineData(false, []string{ diff --git a/process/block/export_test.go b/process/block/export_test.go index a00b2d17dcd..6da9619fab4 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -40,58 +40,49 @@ func (sp *shardProcessor) ReceivedMetaBlock(metaBlockHash []byte) { sp.receivedMetaBlock(metaBlockHash) } -func (sp *shardProcessor) CreateMiniBlocks(noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool) (block.Body, error) { - return sp.createMiniBlocks(noShards, maxItemsInBlock, round, haveTime) +func (sp *shardProcessor) CreateMiniBlocks(maxItemsInBlock uint32, round uint64, haveTime func() bool) (block.Body, error) { + return sp.createMiniBlocks(maxItemsInBlock, round, haveTime) } -func (sp *shardProcessor) GetProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { - return sp.getProcessedMetaBlocksFromHeader(header) +func (sp *shardProcessor) GetOrderedProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { + return sp.getOrderedProcessedMetaBlocksFromHeader(header) } -func (sp *shardProcessor) RemoveProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { - return sp.removeProcessedMetablocksFromPool(processedMetaHdrs) +func (sp *shardProcessor) RemoveProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { + return sp.removeProcessedMetaBlocksFromPool(processedMetaHdrs) } func NewShardProcessorEmptyWith3shards(tdp dataRetriever.PoolsHolder, genesisBlocks map[uint32]data.HeaderHandler) (*shardProcessor, error) { - + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + specialAddressHandler := mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ) arguments := ArgShardProcessor{ - ArgBaseProcessor: &ArgBaseProcessor{ - Accounts: &mock.AccountsStub{}, - ForkDetector: &mock.ForkDetectorMock{}, - Hasher: &mock.HasherMock{}, - Marshalizer: &mock.MarshalizerMock{}, - Store: &mock.ChainStorerMock{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(3), - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - StartHeaders: genesisBlocks, - RequestHandler: &mock.RequestHandlerMock{}, - Core: &mock.ServiceContainerMock{}, + ArgBaseProcessor: ArgBaseProcessor{ + Accounts: &mock.AccountsStub{}, + ForkDetector: &mock.ForkDetectorMock{}, + Hasher: &mock.HasherMock{}, + Marshalizer: &mock.MarshalizerMock{}, + Store: &mock.ChainStorerMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + SpecialAddressHandler: specialAddressHandler, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + StartHeaders: genesisBlocks, + RequestHandler: &mock.RequestHandlerMock{}, + Core: &mock.ServiceContainerMock{}, }, - DataPool: tdp, - BlocksTracker: &mock.BlocksTrackerMock{}, - TxCoordinator: &mock.TransactionCoordinatorMock{}, + DataPool: tdp, + TxCoordinator: &mock.TransactionCoordinatorMock{}, + TxsPoolsCleaner: &mock.TxPoolsCleanerMock{}, } shardProcessor, err := NewShardProcessor(arguments) return shardProcessor, err } -func NewMetaProcessorBasicSingleShard(mdp dataRetriever.MetaPoolsHolder, genesisBlocks map[uint32]data.HeaderHandler) (*metaProcessor, error) { - mp, err := NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - genesisBlocks, - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - return mp, err -} - func (mp *metaProcessor) RequestBlockHeaders(header *block.MetaBlock) (uint32, uint32) { return mp.requestShardHeaders(header) } @@ -100,41 +91,36 @@ func (mp *metaProcessor) RemoveBlockInfoFromPool(header *block.MetaBlock) error return mp.removeBlockInfoFromPool(header) } -func (mp *metaProcessor) ReceivedHeader(hdrHash []byte) { - mp.receivedHeader(hdrHash) +func (mp *metaProcessor) ReceivedShardHeader(shardHeaderHash []byte) { + mp.receivedShardHeader(shardHeaderHash) } -func (mp *metaProcessor) AddHdrHashToRequestedList(hdrHash []byte) { - mp.mutRequestedShardHdrsHashes.Lock() - defer mp.mutRequestedShardHdrsHashes.Unlock() +func (mp *metaProcessor) AddHdrHashToRequestedList(hdr *block.Header, hdrHash []byte) { + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - if mp.requestedShardHdrsHashes == nil { - mp.requestedShardHdrsHashes = make(map[string]bool) - mp.allNeededShardHdrsFound = true + if mp.hdrsForCurrBlock.hdrHashAndInfo == nil { + mp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) } - if mp.currHighestShardHdrsNonces == nil { - mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.currHighestShardHdrsNonces[i] = uint64(0) - } + if mp.hdrsForCurrBlock.highestHdrNonce == nil { + mp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) } - mp.requestedShardHdrsHashes[string(hdrHash)] = true - mp.allNeededShardHdrsFound = false -} - -func (mp *metaProcessor) SetCurrHighestShardHdrsNonces(key uint32, value uint64) { - mp.currHighestShardHdrsNonces[key] = value + mp.hdrsForCurrBlock.hdrHashAndInfo[string(hdrHash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + mp.hdrsForCurrBlock.missingHdrs++ } -func (mp *metaProcessor) IsHdrHashRequested(hdrHash []byte) bool { - mp.mutRequestedShardHdrsHashes.Lock() - defer mp.mutRequestedShardHdrsHashes.Unlock() +func (mp *metaProcessor) IsHdrMissing(hdrHash []byte) bool { + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - _, found := mp.requestedShardHdrsHashes[string(hdrHash)] + hdrInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(hdrHash)] + if !ok { + return true + } - return found + return hdrInfo.hdr == nil || hdrInfo.hdr.IsInterfaceNil() } func (mp *metaProcessor) CreateShardInfo(maxItemsInBlock uint32, round uint64, haveTime func() bool) ([]block.ShardData, error) { @@ -145,8 +131,11 @@ func (mp *metaProcessor) ProcessBlockHeaders(header *block.MetaBlock, round uint return mp.processBlockHeaders(header, round, haveTime) } -func (mp *metaProcessor) RequestFinalMissingHeaders() uint32 { - return mp.requestFinalMissingHeaders() +func (mp *metaProcessor) RequestMissingFinalityAttestingHeaders() uint32 { + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + + return mp.requestMissingFinalityAttestingHeaders() } func (bp *baseProcessor) NotarizedHdrs() map[uint32][]data.HeaderHandler { @@ -169,22 +158,22 @@ func (bp *baseProcessor) SetHasher(hasher hashing.Hasher) { bp.hasher = hasher } -func (mp *metaProcessor) SetNextKValidity(val uint32) { - mp.mutRequestedShardHdrsHashes.Lock() - mp.nextKValidity = val - mp.mutRequestedShardHdrsHashes.Unlock() +func (mp *metaProcessor) SetShardBlockFinality(val uint32) { + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + mp.shardBlockFinality = val + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } func (mp *metaProcessor) SaveLastNotarizedHeader(header *block.MetaBlock) error { return mp.saveLastNotarizedHeader(header) } -func (mp *metaProcessor) CheckShardHeadersValidity(header *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { - return mp.checkShardHeadersValidity(header) +func (mp *metaProcessor) CheckShardHeadersValidity() (map[uint32]data.HeaderHandler, error) { + return mp.checkShardHeadersValidity() } -func (mp *metaProcessor) CheckShardHeadersFinality(header *block.MetaBlock, highestNonceHdrs map[uint32]data.HeaderHandler) error { - return mp.checkShardHeadersFinality(header, highestNonceHdrs) +func (mp *metaProcessor) CheckShardHeadersFinality(highestNonceHdrs map[uint32]data.HeaderHandler) error { + return mp.checkShardHeadersFinality(highestNonceHdrs) } func (bp *baseProcessor) IsHdrConstructionValid(currHdr, prevHdr data.HeaderHandler) error { @@ -235,12 +224,15 @@ func (sp *shardProcessor) GetHashAndHdrStruct(header data.HeaderHandler, hash [] return &hashAndHdr{header, hash} } -func (sp *shardProcessor) RequestFinalMissingHeaders() uint32 { - return sp.requestFinalMissingHeaders() +func (sp *shardProcessor) RequestMissingFinalityAttestingHeaders() uint32 { + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + defer sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() + + return sp.requestMissingFinalityAttestingHeaders() } -func (sp *shardProcessor) CheckMetaHeadersValidityAndFinality(hdr *block.Header) error { - return sp.checkMetaHeadersValidityAndFinality(hdr) +func (sp *shardProcessor) CheckMetaHeadersValidityAndFinality() error { + return sp.checkMetaHeadersValidityAndFinality() } func (sp *shardProcessor) GetOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { @@ -248,22 +240,17 @@ func (sp *shardProcessor) GetOrderedMetaBlocks(round uint64) ([]*hashAndHdr, err } func (sp *shardProcessor) CreateAndProcessCrossMiniBlocksDstMe( - noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool, -) (block.MiniBlockSlice, [][]byte, uint32, error) { - return sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) +) (block.MiniBlockSlice, uint32, uint32, error) { + return sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, round, haveTime) } func (bp *baseProcessor) SetBlockSizeThrottler(blockSizeThrottler process.BlockSizeThrottler) { bp.blockSizeThrottler = blockSizeThrottler } -func (sp *shardProcessor) SetCurrHighestMetaHdrNonce(value uint64) { - sp.currHighestMetaHdrNonce = value -} - func (sp *shardProcessor) DisplayLogInfo( header *block.Header, body block.Body, @@ -287,8 +274,39 @@ func (sp *shardProcessor) RestoreMetaBlockIntoPool( } func (sp *shardProcessor) GetAllMiniBlockDstMeFromMeta( - round uint64, - metaHashes [][]byte, + header *block.Header, ) (map[string][]byte, error) { - return sp.getAllMiniBlockDstMeFromMeta(round, metaHashes) + return sp.getAllMiniBlockDstMeFromMeta(header) +} + +func (sp *shardProcessor) IsMiniBlockProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { + return sp.isMiniBlockProcessed(metaBlockHash, miniBlockHash) +} + +func (sp *shardProcessor) AddProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { + sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) +} + +func (bp *baseProcessor) SetHdrForCurrentBlock(headerHash []byte, headerHandler data.HeaderHandler, usedInBlock bool) { + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.hdrHashAndInfo[string(headerHash)] = &hdrInfo{hdr: headerHandler, usedInBlock: usedInBlock} + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + +func (bp *baseProcessor) SetHighestHdrNonceForCurrentBlock(shardId uint32, value uint64) { + bp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + bp.hdrsForCurrBlock.highestHdrNonce[shardId] = value + bp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() +} + +func (bp *baseProcessor) CreateBlockStarted() { + bp.createBlockStarted() +} + +func (sp *shardProcessor) CreateBlockStarted() { + sp.createBlockStarted() +} + +func (sp *shardProcessor) AddProcessedCrossMiniBlocksFromHeader(header *block.Header) error { + return sp.addProcessedCrossMiniBlocksFromHeader(header) } diff --git a/process/block/interceptedBlockHeader.go b/process/block/interceptedBlockHeader.go index 5af3b9aea39..b1493158f5b 100644 --- a/process/block/interceptedBlockHeader.go +++ b/process/block/interceptedBlockHeader.go @@ -1,8 +1,11 @@ package block import ( + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -11,21 +14,27 @@ import ( // It implements Newer and Hashed interfaces type InterceptedHeader struct { *block.Header - multiSigVerifier crypto.MultiSigVerifier - chronologyValidator process.ChronologyValidator - hash []byte + multiSigVerifier crypto.MultiSigVerifier + hash []byte + nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher } // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedHeader( multiSigVerifier crypto.MultiSigVerifier, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, ) *InterceptedHeader { return &InterceptedHeader{ - Header: &block.Header{}, - multiSigVerifier: multiSigVerifier, - chronologyValidator: chronologyValidator, + Header: &block.Header{}, + multiSigVerifier: multiSigVerifier, + nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, + hasher: hasher, } } @@ -61,7 +70,7 @@ func (inHdr *InterceptedHeader) IntegrityAndValidity(coordinator sharding.Coordi return err } - return inHdr.validityCheck() + return nil } // Integrity checks the integrity of the state block wrapper @@ -106,25 +115,49 @@ func (inHdr *InterceptedHeader) Integrity(coordinator sharding.Coordinator) erro } } -func (inHdr *InterceptedHeader) validityCheck() error { - if inHdr.chronologyValidator == nil { - return process.ErrNilChronologyValidator +// VerifySig verifies the intercepted Header block signature +func (inHdr *InterceptedHeader) VerifySig() error { + randSeed := inHdr.GetPrevRandSeed() + bitmap := inHdr.GetPubKeysBitmap() + + if len(bitmap) == 0 { + return process.ErrNilPubKeysBitmap } - return inHdr.chronologyValidator.ValidateReceivedBlock( - inHdr.ShardId, - inHdr.Epoch, - inHdr.Nonce, - inHdr.Round, - ) -} + if bitmap[0]&1 == 0 { + return process.ErrBlockProposerSignatureMissing -// VerifySig verifies a signature -func (inHdr *InterceptedHeader) VerifySig() error { - // TODO: Check block signature after multisig will be implemented - // TODO: the interceptors do not have access yet to consensus group selection to validate multisigs - // TODO: verify that the block proposer is among the signers and in the bitmap - return nil + } + + consensusPubKeys, err := inHdr.nodesCoordinator.GetValidatorsPublicKeys(randSeed, inHdr.Round, inHdr.ShardId) + if err != nil { + return err + } + + verifier, err := inHdr.multiSigVerifier.Create(consensusPubKeys, 0) + if err != nil { + return err + } + + err = verifier.SetAggregatedSig(inHdr.Signature) + if err != nil { + return err + } + + // get marshalled block header without signature and bitmap + // as this is the message that was signed + headerCopy := *inHdr.Header + headerCopy.Signature = nil + headerCopy.PubKeysBitmap = nil + + hash, err := core.CalculateHash(inHdr.marshalizer, inHdr.hasher, headerCopy) + if err != nil { + return err + } + + err = verifier.Verify(hash, bitmap) + + return err } func (inHdr *InterceptedHeader) validatePeerBlock() error { diff --git a/process/block/interceptedBlockHeader_test.go b/process/block/interceptedBlockHeader_test.go index 92b867e4ccc..dc99d3b8ea5 100644 --- a/process/block/interceptedBlockHeader_test.go +++ b/process/block/interceptedBlockHeader_test.go @@ -11,13 +11,12 @@ import ( ) func createTestInterceptedHeader() *block.InterceptedHeader { + return block.NewInterceptedHeader( mock.NewMultiSigner(), - &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - }, + &mock.NodesCoordinatorMock{}, + &mock.MarshalizerMock{Fail: false}, + mock.HasherMock{}, ) } @@ -245,24 +244,6 @@ func TestInterceptedHeader_IntegrityAndValidityIntegrityDoesNotPassShouldErr(t * assert.Equal(t, process.ErrNilPubKeysBitmap, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) } -func TestInterceptedHeader_IntegrityAndValidityNilChronologyValidatorShouldErr(t *testing.T) { - t.Parallel() - - hdr := block.NewInterceptedHeader( - mock.NewMultiSigner(), - nil, - ) - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.BlockBodyType = block2.PeerBlock - hdr.Signature = make([]byte, 0) - hdr.RootHash = make([]byte, 0) - hdr.PrevRandSeed = make([]byte, 0) - hdr.RandSeed = make([]byte, 0) - - assert.Equal(t, process.ErrNilChronologyValidator, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) -} - func TestInterceptedHeader_IntegrityAndValidityOkValsShouldWork(t *testing.T) { t.Parallel() @@ -283,7 +264,7 @@ func TestInterceptedHeader_VerifySigOkValsShouldWork(t *testing.T) { hdr := createTestInterceptedHeader() hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = block2.PeerBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) diff --git a/process/block/interceptedMetaBlockHeader.go b/process/block/interceptedMetaBlockHeader.go index a73503238ee..b0fbcdf0227 100644 --- a/process/block/interceptedMetaBlockHeader.go +++ b/process/block/interceptedMetaBlockHeader.go @@ -1,8 +1,11 @@ package block import ( + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" ) @@ -11,21 +14,27 @@ import ( // It implements Newer and Hashed interfaces type InterceptedMetaHeader struct { *block.MetaBlock - multiSigVerifier crypto.MultiSigVerifier - chronologyValidator process.ChronologyValidator - hash []byte + multiSigVerifier crypto.MultiSigVerifier + hash []byte + nodesCoordinator sharding.NodesCoordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher } // NewInterceptedHeader creates a new instance of InterceptedHeader struct func NewInterceptedMetaHeader( multiSigVerifier crypto.MultiSigVerifier, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, ) *InterceptedMetaHeader { return &InterceptedMetaHeader{ - MetaBlock: &block.MetaBlock{}, - multiSigVerifier: multiSigVerifier, - chronologyValidator: chronologyValidator, + MetaBlock: &block.MetaBlock{}, + multiSigVerifier: multiSigVerifier, + nodesCoordinator: nodesCoordinator, + marshalizer: marshalizer, + hasher: hasher, } } @@ -51,7 +60,7 @@ func (imh *InterceptedMetaHeader) IntegrityAndValidity(coordinator sharding.Coor return err } - return imh.validityCheck() + return nil } // Integrity checks the integrity of the state block wrapper @@ -98,30 +107,54 @@ func (imh *InterceptedMetaHeader) Integrity(coordinator sharding.Coordinator) er return nil } -func (imh *InterceptedMetaHeader) validityCheck() error { - if imh.chronologyValidator == nil || imh.chronologyValidator.IsInterfaceNil() { - return process.ErrNilChronologyValidator +// VerifySig verifies a signature +func (imh *InterceptedMetaHeader) VerifySig() error { + randSeed := imh.GetPrevRandSeed() + bitmap := imh.GetPubKeysBitmap() + + if len(bitmap) == 0 { + return process.ErrNilPubKeysBitmap } - return imh.chronologyValidator.ValidateReceivedBlock( - sharding.MetachainShardId, - imh.Epoch, - imh.Nonce, - imh.Round, - ) -} + if bitmap[0]&1 == 0 { + return process.ErrBlockProposerSignatureMissing -// VerifySig verifies a signature -func (imh *InterceptedMetaHeader) VerifySig() error { - // TODO: Check block signature after multisig will be implemented - // TODO: the interceptors do not have access yet to consensus group selection to validate multisigs - // TODO: verify that the block proposer is among the signers - return nil + } + + consensusPubKeys, err := imh.nodesCoordinator.GetValidatorsPublicKeys(randSeed, imh.Round, imh.GetShardID()) + if err != nil { + return err + } + + verifier, err := imh.multiSigVerifier.Create(consensusPubKeys, 0) + if err != nil { + return err + } + + err = verifier.SetAggregatedSig(imh.Signature) + if err != nil { + return err + } + + // get marshalled block header without signature and bitmap + // as this is the message that was signed + headerCopy := *imh.MetaBlock + headerCopy.Signature = nil + headerCopy.PubKeysBitmap = nil + + hash, err := core.CalculateHash(imh.marshalizer, imh.hasher, headerCopy) + if err != nil { + return err + } + + err = verifier.Verify(hash, bitmap) + + return err } // IsInterfaceNil returns true if there is no value under the interface -func (imh *InterceptedMetaHeader) IsInterfaceNil() bool { - if imh == nil { +func (mb *InterceptedMetaHeader) IsInterfaceNil() bool { + if mb == nil { return true } return false diff --git a/process/block/interceptedMetaBlockHeader_test.go b/process/block/interceptedMetaBlockHeader_test.go index fdb2bc0af63..43dd2810320 100644 --- a/process/block/interceptedMetaBlockHeader_test.go +++ b/process/block/interceptedMetaBlockHeader_test.go @@ -13,11 +13,9 @@ import ( func createTestInterceptedMetaHeader() *block.InterceptedMetaHeader { return block.NewInterceptedMetaHeader( mock.NewMultiSigner(), - &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - }, + &mock.NodesCoordinatorMock{}, + &mock.MarshalizerMock{Fail: false}, + mock.HasherMock{}, ) } @@ -283,23 +281,6 @@ func TestInterceptedMetaHeader_IntegrityAndValidityIntegrityDoesNotPassShouldErr assert.Equal(t, process.ErrNilPubKeysBitmap, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) } -func TestInterceptedMetaHeader_IntegrityAndValidityNilChronologyValidatorShouldErr(t *testing.T) { - t.Parallel() - - hdr := block.NewInterceptedMetaHeader( - mock.NewMultiSigner(), - nil, - ) - hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) - hdr.Signature = make([]byte, 0) - hdr.RootHash = make([]byte, 0) - hdr.PrevRandSeed = make([]byte, 0) - hdr.RandSeed = make([]byte, 0) - - assert.Equal(t, process.ErrNilChronologyValidator, hdr.IntegrityAndValidity(mock.NewOneShardCoordinatorMock())) -} - func TestInterceptedMetaHeader_IntegrityAndValidityOkValsShouldWork(t *testing.T) { t.Parallel() @@ -319,7 +300,7 @@ func TestInterceptedMetaHeader_VerifySigOkValsShouldWork(t *testing.T) { hdr := createTestInterceptedMetaHeader() hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) hdr.PrevRandSeed = make([]byte, 0) diff --git a/process/block/interceptors/headerInterceptor.go b/process/block/interceptors/headerInterceptor.go index 6f41648e07f..10274db0c17 100644 --- a/process/block/interceptors/headerInterceptor.go +++ b/process/block/interceptors/headerInterceptor.go @@ -15,15 +15,15 @@ import ( // HeaderInterceptor represents an interceptor used for block headers type HeaderInterceptor struct { - marshalizer marshal.Marshalizer - storer storage.Storer - multiSigVerifier crypto.MultiSigVerifier - hasher hashing.Hasher - chronologyValidator process.ChronologyValidator - headers storage.Cacher - headersNonces dataRetriever.Uint64SyncMapCacher - headerValidator process.HeaderValidator - shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + storer storage.Storer + multiSigVerifier crypto.MultiSigVerifier + hasher hashing.Hasher + headers storage.Cacher + headersNonces dataRetriever.Uint64SyncMapCacher + headerValidator process.HeaderValidator + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator } // NewHeaderInterceptor hooks a new interceptor for block headers @@ -36,7 +36,7 @@ func NewHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, ) (*HeaderInterceptor, error) { if marshalizer == nil || marshalizer.IsInterfaceNil() { @@ -60,19 +60,19 @@ func NewHeaderInterceptor( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator } hdrInterceptor := &HeaderInterceptor{ - marshalizer: marshalizer, - multiSigVerifier: multiSigVerifier, - hasher: hasher, - shardCoordinator: shardCoordinator, - chronologyValidator: chronologyValidator, - headers: headers, - headersNonces: headersNonces, - headerValidator: headerValidator, + marshalizer: marshalizer, + multiSigVerifier: multiSigVerifier, + hasher: hasher, + shardCoordinator: shardCoordinator, + headers: headers, + headersNonces: headersNonces, + headerValidator: headerValidator, + nodesCoordinator: nodesCoordinator, } return hdrInterceptor, nil @@ -88,7 +88,7 @@ func (hi *HeaderInterceptor) ParseReceivedMessage(message p2p.MessageP2P) (*bloc return nil, process.ErrNilDataToProcess } - hdrIntercepted := block.NewInterceptedHeader(hi.multiSigVerifier, hi.chronologyValidator) + hdrIntercepted := block.NewInterceptedHeader(hi.multiSigVerifier, hi.nodesCoordinator, hi.marshalizer, hi.hasher) err := hi.marshalizer.Unmarshal(hdrIntercepted, message.Data()) if err != nil { return nil, err diff --git a/process/block/interceptors/headerInterceptor_test.go b/process/block/interceptors/headerInterceptor_test.go index 7e5d84b9e9f..87fd3898754 100644 --- a/process/block/interceptors/headerInterceptor_test.go +++ b/process/block/interceptors/headerInterceptor_test.go @@ -3,6 +3,8 @@ package interceptors_test import ( "bytes" "errors" + "fmt" + "math/big" "sync" "testing" "time" @@ -14,11 +16,38 @@ import ( "github.com/ElrondNetwork/elrond-go/process/block" "github.com/ElrondNetwork/elrond-go/process/block/interceptors" "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/sharding" "github.com/stretchr/testify/assert" ) var durTimeout = time.Second +func generateValidatorsMap(shardSize, metachainSize, nbShards uint32) map[uint32][]sharding.Validator { + nodes := make(map[uint32][]sharding.Validator) + + for shard := uint32(0); shard < nbShards; shard++ { + shardNodes := make([]sharding.Validator, 0) + for valIdx := uint32(0); valIdx < shardSize; valIdx++ { + pk := fmt.Sprintf("pubKey_sh%d_node%d", shard, valIdx) + addr := fmt.Sprintf("address_sh%d_node%d", shard, valIdx) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk), []byte(addr)) + shardNodes = append(shardNodes, v) + } + nodes[shard] = shardNodes + } + + metaNodes := make([]sharding.Validator, 0) + for mValIdx := uint32(0); mValIdx < metachainSize; mValIdx++ { + pk := fmt.Sprintf("pubKey_meta_node%d", mValIdx) + addr := fmt.Sprintf("address_meta_node%d", mValIdx) + v, _ := sharding.NewValidator(big.NewInt(0), 1, []byte(pk), []byte(addr)) + metaNodes = append(metaNodes, v) + } + nodes[sharding.MetachainShardId] = metaNodes + + return nodes +} + //------- NewHeaderInterceptor func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { @@ -36,7 +65,7 @@ func TestNewHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -57,7 +86,7 @@ func TestNewHeaderInterceptor_NilHeadersShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHeadersDataPool, err) @@ -78,7 +107,7 @@ func TestNewHeaderInterceptor_NilHeadersNoncesShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHeadersNoncesDataPool, err) @@ -99,7 +128,7 @@ func TestNewHeaderInterceptor_NilHeaderHandlerValidatorShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilHeaderHandlerValidator, err) @@ -121,7 +150,7 @@ func TestNewHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilMultiSigVerifier, err) @@ -143,7 +172,7 @@ func TestNewHeaderInterceptor_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -165,14 +194,14 @@ func TestNewHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, nil, - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, hi) } -func TestNewHeaderInterceptor_NilChronologyValidatorShouldErr(t *testing.T) { +func TestNewHeaderInterceptor_NilNodesCoordinatorShouldErr(t *testing.T) { t.Parallel() headers := &mock.CacherStub{} @@ -190,7 +219,7 @@ func TestNewHeaderInterceptor_NilChronologyValidatorShouldErr(t *testing.T) { nil, ) - assert.Equal(t, process.ErrNilChronologyValidator, err) + assert.Equal(t, process.ErrNilNodesCoordinator, err) assert.Nil(t, hi) } @@ -209,7 +238,7 @@ func TestNewHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, err) @@ -234,7 +263,7 @@ func TestHeaderInterceptor_ParseReceivedMessageNilMessageShouldErr(t *testing.T) mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) hdr, err := hi.ParseReceivedMessage(nil) @@ -258,7 +287,7 @@ func TestHeaderInterceptor_ParseReceivedMessageNilDataToProcessShouldErr(t *test mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) msg := &mock.P2PMessageMock{} @@ -289,7 +318,7 @@ func TestHeaderInterceptor_ParseReceivedMessageMarshalizerErrorsAtUnmarshalingSh mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + &mock.NodesCoordinatorMock{}, ) msg := &mock.P2PMessageMock{ @@ -309,11 +338,9 @@ func TestHeaderInterceptor_ParseReceivedMessageSanityCheckFailedShouldErr(t *tes multisigner := mock.NewMultiSigner() headers := &mock.CacherStub{} headersNonces := &mock.Uint64SyncMapCacherStub{} - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -321,12 +348,12 @@ func TestHeaderInterceptor_ParseReceivedMessageSanityCheckFailedShouldErr(t *tes headersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -352,11 +379,9 @@ func TestHeaderInterceptor_ParseReceivedMessageValsOkShouldWork(t *testing.T) { multisigner := mock.NewMultiSigner() headers := &mock.CacherStub{} headersNonces := &mock.Uint64SyncMapCacherStub{} - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -364,16 +389,16 @@ func TestHeaderInterceptor_ParseReceivedMessageValsOkShouldWork(t *testing.T) { headersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -414,7 +439,7 @@ func TestHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing. mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMessage, hi.ProcessReceivedMessage(nil)) @@ -430,12 +455,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headersNonces := &mock.Uint64SyncMapCacherStub{} headersNonces.MergeCalled = func(nonce uint64, src dataRetriever.ShardIdHashMap) { if nonce == testedNonce { @@ -449,6 +470,10 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) }, } + nodesCoordinator := mock.NewNodesCoordinatorMock() + nodes := generateValidatorsMap(3, 3, 1) + _ = nodesCoordinator.SetNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -457,14 +482,14 @@ func TestHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *testing.T) multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -507,11 +532,6 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { testedNonce := uint64(67) headers := &mock.CacherStub{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headerValidator := &mock.HeaderValidatorStub{ IsHeaderValidForProcessingCalled: func(headerHandler data.HeaderHandler) bool { @@ -520,6 +540,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { } hdrsNonces := &mock.Uint64SyncMapCacherStub{} + nodesCoordinator := &mock.NodesCoordinatorMock{} + hasher := mock.HasherMock{} hi, _ := interceptors.NewHeaderInterceptor( marshalizer, @@ -527,16 +549,16 @@ func TestHeaderInterceptor_ProcessReceivedMessageTestHdrNonces(t *testing.T) { hdrsNonces, headerValidator, multisigner, - mock.HasherMock{}, - mock.NewMultiShardsCoordinatorMock(2), - chronologyValidator, + hasher, + mock.NewOneShardCoordinatorMock(), + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) @@ -576,12 +598,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd(t *testi marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headersNonces := &mock.Uint64SyncMapCacherStub{} headersNonces.MergeCalled = func(nonce uint64, src dataRetriever.ShardIdHashMap) { if nonce == testedNonce { @@ -595,6 +613,10 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd(t *testi }, } + nodesCoordinator := mock.NewNodesCoordinatorMock() + nodes := generateValidatorsMap(3, 3, 1) + _ = nodesCoordinator.SetNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -603,14 +625,14 @@ func TestHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd(t *testi multisigner, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) @@ -648,12 +670,8 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( marshalizer := &mock.MarshalizerMock{} headers := &mock.CacherStub{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } headersNonces := &mock.Uint64SyncMapCacherStub{} headersNonces.MergeCalled = func(nonce uint64, src dataRetriever.ShardIdHashMap) { if nonce == testedNonce { @@ -670,6 +688,16 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( shardCoordinator.CurrentShard = 2 shardCoordinator.SetNoShards(5) + nodesCoordinator := &mock.NodesCoordinatorMock{ + NbShards: 5, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 2, + } + + nodes := generateValidatorsMap(3, 3, 5) + _ = nodesCoordinator.SetNodesPerShards(nodes) + hi, _ := interceptors.NewHeaderInterceptor( marshalizer, headers, @@ -678,14 +706,14 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( multisigner, mock.HasherMock{}, shardCoordinator, - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.ShardId = 0 hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1} hdr.BlockBodyType = dataBlock.TxBlock hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) @@ -708,5 +736,4 @@ func TestHeaderInterceptor_ProcessReceivedMessageNotForCurrentShardShouldNotAdd( } assert.Nil(t, hi.ProcessReceivedMessage(msg)) - } diff --git a/process/block/interceptors/metachainHeaderInterceptor.go b/process/block/interceptors/metachainHeaderInterceptor.go index 69979922869..af4f382b350 100644 --- a/process/block/interceptors/metachainHeaderInterceptor.go +++ b/process/block/interceptors/metachainHeaderInterceptor.go @@ -23,7 +23,7 @@ type MetachainHeaderInterceptor struct { multiSigVerifier crypto.MultiSigVerifier hasher hashing.Hasher shardCoordinator sharding.Coordinator - chronologyValidator process.ChronologyValidator + nodesCoordinator sharding.NodesCoordinator } // NewMetachainHeaderInterceptor hooks a new interceptor for metachain block headers @@ -36,7 +36,7 @@ func NewMetachainHeaderInterceptor( multiSigVerifier crypto.MultiSigVerifier, hasher hashing.Hasher, shardCoordinator sharding.Coordinator, - chronologyValidator process.ChronologyValidator, + nodesCoordinator sharding.NodesCoordinator, ) (*MetachainHeaderInterceptor, error) { if marshalizer == nil || marshalizer.IsInterfaceNil() { @@ -60,8 +60,8 @@ func NewMetachainHeaderInterceptor( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator } return &MetachainHeaderInterceptor{ @@ -72,7 +72,7 @@ func NewMetachainHeaderInterceptor( multiSigVerifier: multiSigVerifier, hasher: hasher, shardCoordinator: shardCoordinator, - chronologyValidator: chronologyValidator, + nodesCoordinator: nodesCoordinator, metachainHeadersNonces: metachainHeadersNonces, }, nil } @@ -85,7 +85,12 @@ func (mhi *MetachainHeaderInterceptor) ProcessReceivedMessage(message p2p.Messag return err } - metaHdrIntercepted := block.NewInterceptedMetaHeader(mhi.multiSigVerifier, mhi.chronologyValidator) + metaHdrIntercepted := block.NewInterceptedMetaHeader( + mhi.multiSigVerifier, + mhi.nodesCoordinator, + mhi.marshalizer, + mhi.hasher, + ) err = mhi.marshalizer.Unmarshal(metaHdrIntercepted, message.Data()) if err != nil { return err diff --git a/process/block/interceptors/metachainHeaderInterceptor_test.go b/process/block/interceptors/metachainHeaderInterceptor_test.go index ea36ba5be05..7803d21b516 100644 --- a/process/block/interceptors/metachainHeaderInterceptor_test.go +++ b/process/block/interceptors/metachainHeaderInterceptor_test.go @@ -32,7 +32,7 @@ func TestNewMetachainHeaderInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -52,7 +52,7 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMetaHeadersDataPool, err) @@ -72,7 +72,7 @@ func TestNewMetachainHeaderInterceptor_NilMetachainHeadersNoncesShouldErr(t *tes mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMetaHeadersNoncesDataPool, err) @@ -92,7 +92,7 @@ func TestNewMetachainHeaderInterceptor_NilMetaHeaderValidatorShouldErr(t *testin mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHeaderHandlerValidator, err) @@ -113,7 +113,7 @@ func TestNewMetachainHeaderInterceptor_NilMultiSignerShouldErr(t *testing.T) { nil, mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, mhi) @@ -134,7 +134,7 @@ func TestNewMetachainHeaderInterceptor_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilHasher, err) @@ -155,13 +155,34 @@ func TestNewMetachainHeaderInterceptor_NilShardCoordinatorShouldErr(t *testing.T mock.NewMultiSigner(), mock.HasherMock{}, nil, - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, mhi) } +func TestNewMetachainHeaderInterceptor_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + metachainHeaders := &mock.CacherStub{} + headerValidator := &mock.HeaderValidatorStub{} + + mhi, err := interceptors.NewMetachainHeaderInterceptor( + &mock.MarshalizerMock{}, + metachainHeaders, + &mock.Uint64SyncMapCacherStub{}, + headerValidator, + mock.NewMultiSigner(), + mock.HasherMock{}, + mock.NewOneShardCoordinatorMock(), + nil, + ) + + assert.Equal(t, process.ErrNilNodesCoordinator, err) + assert.Nil(t, mhi) +} + func TestNewMetachainHeaderInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -176,7 +197,7 @@ func TestNewMetachainHeaderInterceptor_OkValsShouldWork(t *testing.T) { mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Nil(t, err) @@ -199,7 +220,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilMessageShouldErr(t mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) assert.Equal(t, process.ErrNilMessage, mhi.ProcessReceivedMessage(nil)) @@ -219,7 +240,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageNilDataToProcessShould mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) msg := &mock.P2PMessageMock{} @@ -246,7 +267,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageMarshalizerErrorsAtUnm mock.NewMultiSigner(), mock.HasherMock{}, mock.NewOneShardCoordinatorMock(), - &mock.ChronologyValidatorStub{}, + mock.NewNodesCoordinatorMock(), ) msg := &mock.P2PMessageMock{ @@ -262,24 +283,22 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageSanityCheckFailedShoul metachainHeaders := &mock.CacherStub{} headerValidator := &mock.HeaderValidatorStub{} marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + nodesCoordinator := mock.NewNodesCoordinatorMock() + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, &mock.Uint64SyncMapCacherStub{}, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) buff, _ := marshalizer.Marshal(hdr) msg := &mock.P2PMessageMock{ DataField: buff, @@ -292,6 +311,7 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te t.Parallel() marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} chanDone := make(chan struct{}, 1) testedNonce := uint64(67) metachainHeaders := &mock.CacherStub{} @@ -302,26 +322,23 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageValsOkShouldWork(t *te }, } multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } + nodesCoordinator := &mock.NodesCoordinatorMock{} + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, metachainHeadersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.SetHash([]byte("aaa")) hdr.RootHash = make([]byte, 0) @@ -377,14 +394,10 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd t.Parallel() marshalizer := &mock.MarshalizerMock{} + hasher := mock.HasherMock{} chanDone := make(chan struct{}, 1) testedNonce := uint64(67) multisigner := mock.NewMultiSigner() - chronologyValidator := &mock.ChronologyValidatorStub{ - ValidateReceivedBlockCalled: func(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return nil - }, - } metachainHeaders := &mock.CacherStub{} metachainHeadersNonces := &mock.Uint64SyncMapCacherStub{} headerValidator := &mock.HeaderValidatorStub{ @@ -392,21 +405,24 @@ func TestMetachainHeaderInterceptor_ProcessReceivedMessageIsNotValidShouldNotAdd return false }, } + + nodesCoordinator := &mock.NodesCoordinatorMock{} + mhi, _ := interceptors.NewMetachainHeaderInterceptor( marshalizer, metachainHeaders, metachainHeadersNonces, headerValidator, multisigner, - mock.HasherMock{}, + hasher, mock.NewOneShardCoordinatorMock(), - chronologyValidator, + nodesCoordinator, ) - hdr := block.NewInterceptedMetaHeader(multisigner, chronologyValidator) + hdr := block.NewInterceptedMetaHeader(multisigner, nodesCoordinator, marshalizer, hasher) hdr.Nonce = testedNonce hdr.PrevHash = make([]byte, 0) - hdr.PubKeysBitmap = make([]byte, 0) + hdr.PubKeysBitmap = []byte{1, 0, 0} hdr.Signature = make([]byte, 0) hdr.RootHash = make([]byte, 0) hdr.SetHash([]byte("aaa")) diff --git a/process/block/metablock.go b/process/block/metablock.go index 476d74945ad..7dbace5ea5c 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -10,75 +10,38 @@ import ( "github.com/ElrondNetwork/elrond-go/core/serviceContainer" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/data/state" - "github.com/ElrondNetwork/elrond-go/data/typeConverters" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/dataPool" - "github.com/ElrondNetwork/elrond-go/hashing" - "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/statusHandler" - "github.com/ElrondNetwork/elrond-go/storage" ) // metaProcessor implements metaProcessor interface and actually it tries to execute block type metaProcessor struct { *baseProcessor - core serviceContainer.Core - dataPool dataRetriever.MetaPoolsHolder - - currHighestShardHdrsNonces map[uint32]uint64 - requestedShardHdrsHashes map[string]bool - allNeededShardHdrsFound bool - mutRequestedShardHdrsHashes sync.RWMutex - + core serviceContainer.Core + dataPool dataRetriever.MetaPoolsHolder shardsHeadersNonce *sync.Map - - nextKValidity uint32 - - chRcvAllHdrs chan bool - - headersCounter *headersCounter + shardBlockFinality uint32 + chRcvAllHdrs chan bool + headersCounter *headersCounter } // NewMetaProcessor creates a new metaProcessor object -func NewMetaProcessor( - core serviceContainer.Core, - accounts state.AccountsAdapter, - dataPool dataRetriever.MetaPoolsHolder, - forkDetector process.ForkDetector, - shardCoordinator sharding.Coordinator, - hasher hashing.Hasher, - marshalizer marshal.Marshalizer, - store dataRetriever.StorageService, - startHeaders map[uint32]data.HeaderHandler, - requestHandler process.RequestHandler, - uint64Converter typeConverters.Uint64ByteSliceConverter, -) (*metaProcessor, error) { - - err := checkProcessorNilParameters( - accounts, - forkDetector, - hasher, - marshalizer, - store, - shardCoordinator, - uint64Converter) +func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { + err := checkProcessorNilParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } - if dataPool == nil || dataPool.IsInterfaceNil() { + if arguments.DataPool == nil || arguments.DataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } - if dataPool.ShardHeaders() == nil || dataPool.ShardHeaders().IsInterfaceNil() { + if arguments.DataPool.ShardHeaders() == nil || arguments.DataPool.ShardHeaders().IsInterfaceNil() { return nil, process.ErrNilHeadersDataPool } - if requestHandler == nil || requestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } blockSizeThrottler, err := throttle.NewBlockSizeThrottle() if err != nil { @@ -86,40 +49,42 @@ func NewMetaProcessor( } base := &baseProcessor{ - accounts: accounts, + accounts: arguments.Accounts, blockSizeThrottler: blockSizeThrottler, - forkDetector: forkDetector, - hasher: hasher, - marshalizer: marshalizer, - store: store, - shardCoordinator: shardCoordinator, - uint64Converter: uint64Converter, - onRequestHeaderHandler: requestHandler.RequestHeader, - onRequestHeaderHandlerByNonce: requestHandler.RequestHeaderByNonce, + forkDetector: arguments.ForkDetector, + hasher: arguments.Hasher, + marshalizer: arguments.Marshalizer, + store: arguments.Store, + shardCoordinator: arguments.ShardCoordinator, + nodesCoordinator: arguments.NodesCoordinator, + specialAddressHandler: arguments.SpecialAddressHandler, + uint64Converter: arguments.Uint64Converter, + onRequestHeaderHandler: arguments.RequestHandler.RequestHeader, + onRequestHeaderHandlerByNonce: arguments.RequestHandler.RequestHeaderByNonce, appStatusHandler: statusHandler.NewNilStatusHandler(), } - err = base.setLastNotarizedHeadersSlice(startHeaders) + err = base.setLastNotarizedHeadersSlice(arguments.StartHeaders) if err != nil { return nil, err } mp := metaProcessor{ - core: core, + core: arguments.Core, baseProcessor: base, - dataPool: dataPool, + dataPool: arguments.DataPool, headersCounter: NewHeaderCounter(), } - mp.requestedShardHdrsHashes = make(map[string]bool) + mp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) + mp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) headerPool := mp.dataPool.ShardHeaders() - headerPool.RegisterHandler(mp.receivedHeader) + headerPool.RegisterHandler(mp.receivedShardHeader) mp.chRcvAllHdrs = make(chan bool) - mp.nextKValidity = process.ShardBlockFinality - mp.allNeededShardHdrsFound = true + mp.shardBlockFinality = process.ShardBlockFinality mp.shardsHeadersNonce = &sync.Map{} @@ -140,6 +105,14 @@ func (mp *metaProcessor) ProcessBlock( err := mp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) if err != nil { + if err == process.ErrBlockHashDoesNotMatch { + log.Info(fmt.Sprintf("requested missing meta header with hash %s for shard %d\n", + core.ToB64(headerHandler.GetPrevHash()), + headerHandler.GetShardID())) + + go mp.onRequestHeaderHandler(headerHandler.GetShardID(), headerHandler.GetPrevHash()) + } + return err } @@ -160,22 +133,31 @@ func (mp *metaProcessor) ProcessBlock( mp.headersCounter.getNumShardMBHeadersTotalProcessed(), ) - requestedShardHdrs, requestedFinalShardHdrs := mp.requestShardHeaders(header) + mp.createBlockStarted() + requestedShardHdrs, requestedFinalityAttestingShardHdrs := mp.requestShardHeaders(header) if haveTime() < 0 { return process.ErrTimeIsOut } - if requestedShardHdrs > 0 || requestedFinalShardHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing shard headers and %d final shard headers\n", requestedShardHdrs, requestedFinalShardHdrs)) + haveMissingShardHeaders := requestedShardHdrs > 0 || requestedFinalityAttestingShardHdrs > 0 + if haveMissingShardHeaders { + log.Info(fmt.Sprintf("requested %d missing shard headers and %d finality attesting shard headers\n", + requestedShardHdrs, + requestedFinalityAttestingShardHdrs)) + err = mp.waitForBlockHeaders(haveTime()) - mp.mutRequestedShardHdrsHashes.Lock() - mp.allNeededShardHdrsFound = true - unreceivedShardHdrs := len(mp.requestedShardHdrsHashes) - mp.mutRequestedShardHdrsHashes.Unlock() + + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + missingShardHdrs := mp.hdrsForCurrBlock.missingHdrs + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + mp.resetMissingHdrs() + if requestedShardHdrs > 0 { - log.Info(fmt.Sprintf("received %d missing shard headers\n", int(requestedShardHdrs)-unreceivedShardHdrs)) + log.Info(fmt.Sprintf("received %d missing shard headers\n", requestedShardHdrs-missingShardHdrs)) } + if err != nil { return err } @@ -189,12 +171,12 @@ func (mp *metaProcessor) ProcessBlock( go mp.checkAndRequestIfShardHeadersMissing(header.Round) }() - highestNonceHdrs, err := mp.checkShardHeadersValidity(header) + highestNonceHdrs, err := mp.checkShardHeadersValidity() if err != nil { return err } - err = mp.checkShardHeadersFinality(header, highestNonceHdrs) + err = mp.checkShardHeadersFinality(highestNonceHdrs) if err != nil { return err } @@ -211,13 +193,18 @@ func (mp *metaProcessor) ProcessBlock( } if !mp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch + err = process.ErrRootStateDoesNotMatch return err } return nil } +// SetConsensusData - sets the reward addresses for the current consensus group +func (mp *metaProcessor) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + // nothing to do +} + func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(round) if err != nil { @@ -227,9 +214,9 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { // map from *block.Header to dataHandler - sortedHdrs := make([]data.HeaderHandler, 0) + sortedHdrs := make([]data.HeaderHandler, len(sortedHdrPerShard[i])) for j := 0; j < len(sortedHdrPerShard[i]); j++ { - sortedHdrs = append(sortedHdrs, sortedHdrPerShard[i][j]) + sortedHdrs[j] = sortedHdrPerShard[i][j] } err := mp.requestHeadersIfMissing(sortedHdrs, i, round) @@ -242,18 +229,28 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing(round uint64) { return } -func (mp *metaProcessor) indexBlock(metaBlock *block.MetaBlock, headerPool map[string]*block.Header) { +func (mp *metaProcessor) indexBlock( + metaBlock data.HeaderHandler, + lastMetaBlock data.HeaderHandler, +) { if mp.core == nil || mp.core.Indexer() == nil { return } - // Update tps benchmarks in the DB tpsBenchmark := mp.core.TPSBenchmark() if tpsBenchmark != nil { go mp.core.Indexer().UpdateTPS(tpsBenchmark) } - //TODO: maybe index metablocks also? + publicKeys, err := mp.nodesCoordinator.GetValidatorsPublicKeys(metaBlock.GetPrevRandSeed(), metaBlock.GetRound(), sharding.MetachainShardId) + if err != nil { + return + } + + signersIndexes := mp.nodesCoordinator.GetValidatorsIndexes(publicKeys) + go mp.core.Indexer().SaveMetaBlock(metaBlock, signersIndexes) + + saveRoundInfoInElastic(mp.core.Indexer(), mp.nodesCoordinator, sharding.MetachainShardId, metaBlock, lastMetaBlock, signersIndexes) } // removeBlockInfoFromPool removes the block info from associated pools @@ -272,22 +269,25 @@ func (mp *metaProcessor) removeBlockInfoFromPool(header *block.MetaBlock) error return process.ErrNilHeadersNoncesDataPool } + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - - obj, ok := headerPool.Peek(shardData.HeaderHash) + shardHeaderHash := header.ShardInfo[i].HeaderHash + hdrInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] if !ok { - continue + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrMissingHeader } - hdr, ok := obj.(*block.Header) + shardBlock, ok := hdrInfo.hdr.(*block.Header) if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return process.ErrWrongTypeAssertion } - headerPool.Remove(shardData.HeaderHash) - headerNoncesPool.Remove(hdr.Nonce, hdr.ShardId) + headerPool.Remove([]byte(shardHeaderHash)) + headerNoncesPool.Remove(shardBlock.Nonce, shardBlock.ShardId) } + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return nil } @@ -315,10 +315,9 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, return process.ErrNilHeadersNoncesDataPool } - hdrHashes := make([][]byte, 0) + hdrHashes := make([][]byte, len(header.ShardInfo)) for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - hdrHashes = append(hdrHashes, shardData.HeaderHash) + hdrHashes[i] = header.ShardInfo[i].HeaderHash } for _, hdrHash := range hdrHashes { @@ -358,15 +357,13 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, // CreateBlockBody creates block body of metachain func (mp *metaProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) + mp.createBlockStarted() mp.blockSizeThrottler.ComputeMaxItems() return &block.MetaBlockBody{}, nil } func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round uint64, haveTime func() time.Duration) error { - hdrPool := mp.dataPool.ShardHeaders() - msg := "" - for i := 0; i < len(header.ShardInfo); i++ { shardData := header.ShardInfo[i] for j := 0; j < len(shardData.ShardMiniBlockHeaders); j++ { @@ -379,10 +376,10 @@ func (mp *metaProcessor) processBlockHeaders(header *block.MetaBlock, round uint err := mp.checkAndProcessShardMiniBlockHeader( headerHash, shardMiniBlockHeader, - hdrPool, round, shardData.ShardId, ) + if err != nil { return err } @@ -412,8 +409,6 @@ func (mp *metaProcessor) CommitBlock( } }() - tempHeaderPool := make(map[string]*block.Header) - err = checkForNils(chainHandler, headerHandler, bodyHandler) if err != nil { return err @@ -464,30 +459,38 @@ func (mp *metaProcessor) CommitBlock( return err } + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if header == nil { - return err + shardHeaderHash := header.ShardInfo[i].HeaderHash + hdrInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrMissingHeader } - mp.updateShardHeadersNonce(shardData.ShardId, header.Nonce) + shardBlock, ok := hdrInfo.hdr.(*block.Header) + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrWrongTypeAssertion + } - tempHeaderPool[string(shardData.HeaderHash)] = header + mp.updateShardHeadersNonce(shardBlock.ShardId, shardBlock.Nonce) - buff, err = mp.marshalizer.Marshal(header) + buff, err = mp.marshalizer.Marshal(shardBlock) if err != nil { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return err } - nonceToByteSlice := mp.uint64Converter.ToByteSlice(header.Nonce) - hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(header.ShardId) - errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, shardData.HeaderHash) + nonceToByteSlice := mp.uint64Converter.ToByteSlice(shardBlock.Nonce) + hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardBlock.ShardId) + errNotCritical = mp.store.Put(hdrNonceHashDataUnit, nonceToByteSlice, shardHeaderHash) log.LogIfError(errNotCritical) - errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, shardData.HeaderHash, buff) + errNotCritical = mp.store.Put(dataRetriever.BlockHeaderUnit, shardHeaderHash, buff) log.LogIfError(errNotCritical) } + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() mp.saveMetricCrossCheckBlockHeight() @@ -501,7 +504,7 @@ func (mp *metaProcessor) CommitBlock( return err } - log.Info(fmt.Sprintf("metaBlock with nonce %d and hash %s has been committed successfully\n", + log.Info(fmt.Sprintf("meta block with nonce %d and hash %s has been committed successfully\n", header.Nonce, core.ToB64(headerHash))) @@ -515,9 +518,15 @@ func (mp *metaProcessor) CommitBlock( log.Debug(errNotCritical.Error()) } - hdrsToAttestPreviousFinal := mp.nextKValidity + 1 + log.Info(fmt.Sprintf("meta block with nonce %d is the highest final block in shard %d\n", + mp.forkDetector.GetHighestFinalBlockNonce(), + mp.shardCoordinator.SelfId())) + + hdrsToAttestPreviousFinal := mp.shardBlockFinality + 1 mp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) + lastMetaBlock := chainHandler.GetCurrentBlockHeader() + err = chainHandler.SetCurrentBlockBody(body) if err != nil { return err @@ -534,7 +543,7 @@ func (mp *metaProcessor) CommitBlock( mp.core.TPSBenchmark().Update(header) } - mp.indexBlock(header, tempHeaderPool) + mp.indexBlock(header, lastMetaBlock) mp.appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, core.ToB64(headerHash)) @@ -580,11 +589,7 @@ func (mp *metaProcessor) saveMetricCrossCheckBlockHeight() { continue } - if i > 0 { - crossCheckBlockHeight += ", " - } - - crossCheckBlockHeight += fmt.Sprintf("%d: %d", i, valueStored) + crossCheckBlockHeight += fmt.Sprintf("%d: %d, ", i, valueStored) } mp.appStatusHandler.SetStringValue(core.MetricCrossCheckBlockHeight, crossCheckBlockHeight) @@ -603,17 +608,26 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error tmpLastNotarizedHdrForShard[i] = mp.lastNotarizedHdrForShard(i) } + mp.hdrsForCurrBlock.mutHdrsForBlock.RLock() for i := 0; i < len(header.ShardInfo); i++ { - shardData := header.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - return err + shardHeaderHash := header.ShardInfo[i].HeaderHash + hdrInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrMissingHeader + } + + shardHdr, ok := hdrInfo.hdr.(*block.Header) + if !ok { + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrWrongTypeAssertion } - if tmpLastNotarizedHdrForShard[header.ShardId].GetNonce() < header.Nonce { - tmpLastNotarizedHdrForShard[header.ShardId] = header + if tmpLastNotarizedHdrForShard[shardHdr.ShardId].GetNonce() < shardHdr.Nonce { + tmpLastNotarizedHdrForShard[shardHdr.ShardId] = shardHdr } } + mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { mp.notarizedHdrs[i] = append(mp.notarizedHdrs[i], tmpLastNotarizedHdrForShard[i]) @@ -623,45 +637,9 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error return nil } -// gets all the headers from the metablock in sorted order per shard -func (mp *metaProcessor) getSortedShardHdrsFromMetablock(metaBlock *block.MetaBlock) (map[uint32][]*block.Header, error) { - sortedShardHdrs := make(map[uint32][]*block.Header, mp.shardCoordinator.NumberOfShards()) - - requestedHeaders := 0 - for i := 0; i < len(metaBlock.ShardInfo); i++ { - shardData := metaBlock.ShardInfo[i] - header, err := process.GetShardHeaderFromPool(shardData.HeaderHash, mp.dataPool.ShardHeaders()) - if err != nil { - log.Debug(err.Error()) - requestedHeaders++ - go mp.onRequestHeaderHandler(shardData.ShardId, shardData.HeaderHash) - continue - } - - sortedShardHdrs[shardData.ShardId] = append(sortedShardHdrs[shardData.ShardId], header) - } - - if requestedHeaders > 0 { - return nil, process.ErrMissingHeader - } - - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] - if len(hdrsForShard) <= 1 { - continue - } - - sort.Slice(hdrsForShard, func(i, j int) bool { - return hdrsForShard[i].GetNonce() < hdrsForShard[j].GetNonce() - }) - } - - return sortedShardHdrs, nil -} - // check if shard headers were signed and constructed correctly and returns headers which has to be // checked for finality -func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map[uint32]data.HeaderHandler, error) { +func (mp *metaProcessor) checkShardHeadersValidity() (map[uint32]data.HeaderHandler, error) { mp.mutNotarizedHdrs.RLock() if mp.notarizedHdrs == nil { mp.mutNotarizedHdrs.RUnlock() @@ -674,25 +652,22 @@ func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map } mp.mutNotarizedHdrs.RUnlock() - sortedShardHdrs, err := mp.getSortedShardHdrsFromMetablock(header) - if err != nil { - return nil, err - } + highestNonceHdrs := make(map[uint32]data.HeaderHandler) - highestNonceHdrs := make(map[uint32]data.HeaderHandler, mp.shardCoordinator.NumberOfShards()) - for shId := uint32(0); shId < mp.shardCoordinator.NumberOfShards(); shId++ { - hdrsForShard := sortedShardHdrs[shId] - if len(hdrsForShard) == 0 { - continue - } + usedShardHdrs := mp.sortHeadersForCurrentBlockByNonce(true) + if len(usedShardHdrs) == 0 { + return highestNonceHdrs, nil + } - for i := 0; i < len(hdrsForShard); i++ { - err := mp.isHdrConstructionValid(hdrsForShard[i], tmpLastNotarized[shId]) + for shardId, hdrsForShard := range usedShardHdrs { + for _, shardHdr := range hdrsForShard { + err := mp.isHdrConstructionValid(shardHdr, tmpLastNotarized[shardId]) if err != nil { return nil, err } - tmpLastNotarized[shId] = hdrsForShard[i] - highestNonceHdrs[shId] = hdrsForShard[i] + + tmpLastNotarized[shardId] = shardHdr + highestNonceHdrs[shardId] = shardHdr } } @@ -700,44 +675,38 @@ func (mp *metaProcessor) checkShardHeadersValidity(header *block.MetaBlock) (map } // check if shard headers are final by checking if newer headers were constructed upon them -func (mp *metaProcessor) checkShardHeadersFinality(header *block.MetaBlock, highestNonceHdrs map[uint32]data.HeaderHandler) error { - if header == nil { - return process.ErrNilBlockHeader - } - - //TODO: change this to look at the pool where values are saved by prevHash. can be done after resolver is done - _, _, sortedHdrPerShard, err := mp.getOrderedHdrs(header.GetRound()) - if err != nil { - return err - } +func (mp *metaProcessor) checkShardHeadersFinality(highestNonceHdrs map[uint32]data.HeaderHandler) error { + finalityAttestingShardHdrs := mp.sortHeadersForCurrentBlockByNonce(false) - for index, lastVerifiedHdr := range highestNonceHdrs { - if index != lastVerifiedHdr.GetShardID() { + for shardId, lastVerifiedHdr := range highestNonceHdrs { + if lastVerifiedHdr == nil || lastVerifiedHdr.IsInterfaceNil() { + return process.ErrNilBlockHeader + } + if lastVerifiedHdr.GetShardID() != shardId { return process.ErrShardIdMissmatch } // verify if there are "K" block after current to make this one final nextBlocksVerified := uint32(0) - shId := lastVerifiedHdr.GetShardID() - for i := 0; i < len(sortedHdrPerShard[shId]); i++ { - if nextBlocksVerified >= mp.nextKValidity { + for _, shardHdr := range finalityAttestingShardHdrs[shardId] { + if nextBlocksVerified >= mp.shardBlockFinality { break } // found a header with the next nonce - tmpHdr := sortedHdrPerShard[shId][i] - if tmpHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := mp.isHdrConstructionValid(tmpHdr, lastVerifiedHdr) + if shardHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := mp.isHdrConstructionValid(shardHdr, lastVerifiedHdr) if err != nil { + log.Debug(err.Error()) continue } - lastVerifiedHdr = tmpHdr + lastVerifiedHdr = shardHdr nextBlocksVerified += 1 } } - if nextBlocksVerified < mp.nextKValidity { + if nextBlocksVerified < mp.shardBlockFinality { go mp.onRequestHeaderHandlerByNonce(lastVerifiedHdr.GetShardID(), lastVerifiedHdr.GetNonce()+1) return process.ErrHeaderNotFinal } @@ -767,7 +736,7 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr nextBlocksVerified := uint32(0) hdrIds := make([]uint32, 0) for i := 0; i < len(sortedShardHdrs); i++ { - if nextBlocksVerified >= mp.nextKValidity { + if nextBlocksVerified >= mp.shardBlockFinality { return true, hdrIds } @@ -785,95 +754,99 @@ func (mp *metaProcessor) isShardHeaderValidFinal(currHdr *block.Header, lastHdr } } - if nextBlocksVerified >= mp.nextKValidity { + if nextBlocksVerified >= mp.shardBlockFinality { return true, hdrIds } return false, nil } -// receivedHeader is a call back function which is called when a new header +// receivedShardHeader is a call back function which is called when a new header // is added in the headers pool -func (mp *metaProcessor) receivedHeader(headerHash []byte) { - shardHdrsCache := mp.dataPool.ShardHeaders() - if shardHdrsCache == nil { - return - } - - shardHdrsNoncesCache := mp.dataPool.HeadersNonces() - if shardHdrsNoncesCache == nil && mp.nextKValidity > 0 { +func (mp *metaProcessor) receivedShardHeader(shardHeaderHash []byte) { + shardHeaderPool := mp.dataPool.ShardHeaders() + if shardHeaderPool == nil { return } - obj, ok := shardHdrsCache.Peek(headerHash) + obj, ok := shardHeaderPool.Peek(shardHeaderHash) if !ok { return } - header, ok := obj.(data.HeaderHandler) + shardHeader, ok := obj.(*block.Header) if !ok { return } - log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", - core.ToB64(headerHash), - header.GetNonce())) + log.Debug(fmt.Sprintf("received shard block with hash %s and nonce %d from network\n", + core.ToB64(shardHeaderHash), + shardHeader.Nonce)) - mp.mutRequestedShardHdrsHashes.Lock() + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - if !mp.allNeededShardHdrsFound { - if mp.requestedShardHdrsHashes[string(headerHash)] { - delete(mp.requestedShardHdrsHashes, string(headerHash)) + haveMissingShardHeaders := mp.hdrsForCurrBlock.missingHdrs > 0 || mp.hdrsForCurrBlock.missingFinalityAttestingHdrs > 0 + if haveMissingShardHeaders { + hdrInfoForHash := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] + receivedMissingShardHeader := hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) + if receivedMissingShardHeader { + hdrInfoForHash.hdr = shardHeader + mp.hdrsForCurrBlock.missingHdrs-- - if header.GetNonce() > mp.currHighestShardHdrsNonces[header.GetShardID()] { - mp.currHighestShardHdrsNonces[header.GetShardID()] = header.GetNonce() + if shardHeader.Nonce > mp.hdrsForCurrBlock.highestHdrNonce[shardHeader.ShardId] { + mp.hdrsForCurrBlock.highestHdrNonce[shardHeader.ShardId] = shardHeader.Nonce } } - lenReqShardHdrsHashes := len(mp.requestedShardHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqShardHdrsHashes == 0 { - requestedBlockHeaders := mp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - log.Info(fmt.Sprintf("received all final shard headers\n")) - areFinalAttestingHdrsInCache = true + if mp.hdrsForCurrBlock.missingHdrs == 0 { + missingFinalityAttestingShardHdrs := mp.hdrsForCurrBlock.missingFinalityAttestingHdrs + mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = mp.requestMissingFinalityAttestingHeaders() + if mp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { + log.Info(fmt.Sprintf("received %d missing finality attesting shard headers\n", missingFinalityAttestingShardHdrs)) } else { - log.Info(fmt.Sprintf("requested %d missing final shard headers\n", requestedBlockHeaders)) + log.Info(fmt.Sprintf("requested %d missing finality attesting shard headers\n", mp.hdrsForCurrBlock.missingFinalityAttestingHdrs)) } } - mp.allNeededShardHdrsFound = lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache + missingShardHdrs := mp.hdrsForCurrBlock.missingHdrs + missingFinalityAttestingShardHdrs := mp.hdrsForCurrBlock.missingFinalityAttestingHdrs + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - mp.mutRequestedShardHdrsHashes.Unlock() - - if lenReqShardHdrsHashes == 0 && areFinalAttestingHdrsInCache { + allMissingShardHeadersReceived := missingShardHdrs == 0 && missingFinalityAttestingShardHdrs == 0 + if allMissingShardHeadersReceived { mp.chRcvAllHdrs <- true } } else { - mp.mutRequestedShardHdrsHashes.Unlock() + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } } -// requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the -// current block. It requests the nextKValidity headers greater than the highest shard header, for each shard, related +// requestMissingFinalityAttestingHeaders requests the headers needed to accept the current selected headers for processing the +// current block. It requests the shardBlockFinality headers greater than the highest shard header, for each shard, related // to the block which should be processed -func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { +func (mp *metaProcessor) requestMissingFinalityAttestingHeaders() uint32 { requestedBlockHeaders := uint32(0) for shardId := uint32(0); shardId < mp.shardCoordinator.NumberOfShards(); shardId++ { - for i := mp.currHighestShardHdrsNonces[shardId] + 1; i <= mp.currHighestShardHdrsNonces[shardId]+uint64(mp.nextKValidity); i++ { - if mp.currHighestShardHdrsNonces[shardId] == uint64(0) { - continue - } + highestHdrNonce := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + if highestHdrNonce == uint64(0) { + continue + } - _, _, err := process.GetShardHeaderFromPoolWithNonce( + lastFinalityAttestingHeader := mp.hdrsForCurrBlock.highestHdrNonce[shardId] + uint64(mp.shardBlockFinality) + for i := highestHdrNonce + 1; i <= lastFinalityAttestingHeader; i++ { + shardHeader, shardHeaderHash, err := process.GetShardHeaderFromPoolWithNonce( i, shardId, mp.dataPool.ShardHeaders(), mp.dataPool.HeadersNonces()) + if err != nil { requestedBlockHeaders++ go mp.onRequestHeaderHandlerByNonce(shardId, i) + continue } + + mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardHeaderHash)] = &hdrInfo{hdr: shardHeader, usedInBlock: false} } } @@ -883,78 +856,64 @@ func (mp *metaProcessor) requestFinalMissingHeaders() uint32 { func (mp *metaProcessor) requestShardHeaders(metaBlock *block.MetaBlock) (uint32, uint32) { _ = process.EmptyChannel(mp.chRcvAllHdrs) - mp.mutRequestedShardHdrsHashes.Lock() - - mp.requestedShardHdrsHashes = make(map[string]bool) - mp.allNeededShardHdrsFound = true - if len(metaBlock.ShardInfo) == 0 { - mp.mutRequestedShardHdrsHashes.Unlock() return 0, 0 } - missingHeaderHashes := mp.computeMissingHeaders(metaBlock) + missingHeaderHashes := mp.computeMissingAndExistingShardHeaders(metaBlock) - requestedBlockHeaders := uint32(0) - for shardId, headerHashes := range missingHeaderHashes { - for _, headerHash := range headerHashes { - requestedBlockHeaders++ - mp.requestedShardHdrsHashes[string(headerHash)] = true - go mp.onRequestHeaderHandler(shardId, headerHash) + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + for shardId, shardHeaderHashes := range missingHeaderHashes { + for _, hash := range shardHeaderHashes { + mp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil, usedInBlock: true} + go mp.onRequestHeaderHandler(shardId, hash) } } - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } else { - requestedFinalBlockHeaders = mp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - mp.allNeededShardHdrsFound = false - } + if mp.hdrsForCurrBlock.missingHdrs == 0 { + mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = mp.requestMissingFinalityAttestingHeaders() } - mp.mutRequestedShardHdrsHashes.Unlock() + requestedHdrs := mp.hdrsForCurrBlock.missingHdrs + requestedFinalityAttestingHdrs := mp.hdrsForCurrBlock.missingFinalityAttestingHdrs + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return requestedBlockHeaders, requestedFinalBlockHeaders + return requestedHdrs, requestedFinalityAttestingHdrs } -func (mp *metaProcessor) computeMissingHeaders(metaBlock *block.MetaBlock) map[uint32][][]byte { - missingHeaders := make(map[uint32][][]byte) - mp.currHighestShardHdrsNonces = make(map[uint32]uint64, mp.shardCoordinator.NumberOfShards()) - for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { - mp.currHighestShardHdrsNonces[i] = uint64(0) - } +func (mp *metaProcessor) computeMissingAndExistingShardHeaders(metaBlock *block.MetaBlock) map[uint32][][]byte { + missingHeadersHashes := make(map[uint32][][]byte) + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(metaBlock.ShardInfo); i++ { shardData := metaBlock.ShardInfo[i] hdr, err := process.GetShardHeaderFromPool( shardData.HeaderHash, mp.dataPool.ShardHeaders()) + if err != nil { - missingHeaders[shardData.ShardId] = append(missingHeaders[shardData.ShardId], shardData.HeaderHash) + missingHeadersHashes[shardData.ShardId] = append(missingHeadersHashes[shardData.ShardId], shardData.HeaderHash) + mp.hdrsForCurrBlock.missingHdrs++ continue } - if hdr.Nonce > mp.currHighestShardHdrsNonces[shardData.ShardId] { - mp.currHighestShardHdrsNonces[shardData.ShardId] = hdr.Nonce + mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardData.HeaderHash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + + if hdr.Nonce > mp.hdrsForCurrBlock.highestHdrNonce[shardData.ShardId] { + mp.hdrsForCurrBlock.highestHdrNonce[shardData.ShardId] = hdr.Nonce } } + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return missingHeaders + return missingHeadersHashes } func (mp *metaProcessor) checkAndProcessShardMiniBlockHeader( headerHash []byte, shardMiniBlockHeader *block.ShardMiniBlockHeader, - hdrPool storage.Cacher, round uint64, shardId uint32, ) error { - - if hdrPool == nil || hdrPool.IsInterfaceNil() { - return process.ErrNilHeadersDataPool - } // TODO: real processing has to be done here, using metachain state return nil } @@ -977,11 +936,6 @@ func (mp *metaProcessor) createShardInfo( return shardInfo, nil } - hdrPool := mp.dataPool.ShardHeaders() - if hdrPool == nil { - return nil, process.ErrNilHeadersDataPool - } - mbHdrs := uint32(0) timeBefore := time.Now() @@ -1001,7 +955,7 @@ func (mp *metaProcessor) createShardInfo( log.Info(fmt.Sprintf("creating shard info has been started: have %d hdrs in pool\n", len(orderedHdrs))) - // save last committed hdr for verification + // save last committed header for verification mp.mutNotarizedHdrs.RLock() if mp.notarizedHdrs == nil { mp.mutNotarizedHdrs.RUnlock() @@ -1012,6 +966,7 @@ func (mp *metaProcessor) createShardInfo( } mp.mutNotarizedHdrs.RUnlock() + mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for index := range orderedHdrs { shId := orderedHdrs[index].ShardId @@ -1050,7 +1005,6 @@ func (mp *metaProcessor) createShardInfo( err := mp.checkAndProcessShardMiniBlockHeader( orderedHdrHashes[index], &shardMiniBlockHeader, - hdrPool, round, shardData.ShardId, ) @@ -1075,9 +1029,11 @@ func (mp *metaProcessor) createShardInfo( if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { shardInfo = append(shardInfo, shardData) + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[index])] = &hdrInfo{hdr: orderedHdrs[index], usedInBlock: true} } log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() return shardInfo, nil } } @@ -1087,16 +1043,20 @@ func (mp *metaProcessor) createShardInfo( if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { shardInfo = append(shardInfo, shardData) + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[index])] = &hdrInfo{hdr: orderedHdrs[index], usedInBlock: true} } log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() return shardInfo, nil } if len(shardData.ShardMiniBlockHeaders) == len(orderedHdrs[index].MiniBlockHeaders) { shardInfo = append(shardInfo, shardData) + mp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedHdrHashes[index])] = &hdrInfo{hdr: orderedHdrs[index], usedInBlock: true} } } + mp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() log.Info(fmt.Sprintf("creating shard info has been finished: created %d shard data\n", len(shardInfo))) return shardInfo, nil @@ -1140,7 +1100,7 @@ func (mp *metaProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round u mp.blockSizeThrottler.Add( round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) + core.MaxUint32(header.ItemsInBody(), header.ItemsInHeader())) return header, nil } @@ -1169,12 +1129,12 @@ func (mp *metaProcessor) MarshalizedDataToBroadcast( } func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte, map[uint32][]*block.Header, error) { - hdrStore := mp.dataPool.ShardHeaders() - if hdrStore == nil { - return nil, nil, nil, process.ErrNilCacher + shardBlocksPool := mp.dataPool.ShardHeaders() + if shardBlocksPool == nil { + return nil, nil, nil, process.ErrNilShardBlockPool } - hashAndBlockMap := make(map[uint32][]*hashAndHdr, mp.shardCoordinator.NumberOfShards()) + hashAndBlockMap := make(map[uint32][]*hashAndHdr) headersMap := make(map[uint32][]*block.Header) headers := make([]*block.Header, 0) hdrHashes := make([][]byte, 0) @@ -1186,8 +1146,8 @@ func (mp *metaProcessor) getOrderedHdrs(round uint64) ([]*block.Header, [][]byte } // get keys and arrange them into shards - for _, key := range hdrStore.Keys() { - val, _ := hdrStore.Peek(key) + for _, key := range shardBlocksPool.Keys() { + val, _ := shardBlocksPool.Peek(key) if val == nil { continue } diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 741ec1aab42..0e01d4a01fe 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -20,6 +20,29 @@ import ( "github.com/stretchr/testify/assert" ) +func createMockMetaArguments() blproc.ArgMetaProcessor { + mdp := initMetaDataPool() + shardCoordinator := mock.NewOneShardCoordinatorMock() + arguments := blproc.ArgMetaProcessor{ + ArgBaseProcessor: blproc.ArgBaseProcessor{ + Accounts: &mock.AccountsStub{}, + ForkDetector: &mock.ForkDetectorMock{}, + Hasher: &mock.HasherStub{}, + Marshalizer: &mock.MarshalizerMock{}, + Store: &mock.ChainStorerMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: mock.NewNodesCoordinatorMock(), + SpecialAddressHandler: &mock.SpecialAddressHandlerMock{}, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + StartHeaders: createGenesisBlocks(shardCoordinator), + RequestHandler: &mock.RequestHandlerMock{}, + Core: &mock.ServiceContainerMock{}, + }, + DataPool: mdp, + } + return arguments +} + func createMetaBlockHeader() *block.MetaBlock { hdr := block.MetaBlock{ Nonce: 1, @@ -124,20 +147,10 @@ func setLastNotarizedHdr( func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - nil, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Accounts = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilAccountsAdapter, err) assert.Nil(t, be) } @@ -145,19 +158,10 @@ func TestNewMetaProcessor_NilAccountsAdapterShouldErr(t *testing.T) { func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { t.Parallel() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - nil, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.DataPool = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilDataPoolHolder, err) assert.Nil(t, be) } @@ -165,20 +169,10 @@ func TestNewMetaProcessor_NilDataPoolShouldErr(t *testing.T) { func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - nil, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.ForkDetector = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilForkDetector, err) assert.Nil(t, be) } @@ -186,20 +180,10 @@ func TestNewMetaProcessor_NilForkDetectorShouldErr(t *testing.T) { func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - nil, - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.ShardCoordinator = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, be) } @@ -207,20 +191,10 @@ func TestNewMetaProcessor_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - nil, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Hasher = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilHasher, err) assert.Nil(t, be) } @@ -228,20 +202,10 @@ func TestNewMetaProcessor_NilHasherShouldErr(t *testing.T) { func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - nil, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Marshalizer = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilMarshalizer, err) assert.Nil(t, be) } @@ -249,20 +213,10 @@ func TestNewMetaProcessor_NilMarshalizerShouldErr(t *testing.T) { func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - nil, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Store = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilStorage, err) assert.Nil(t, be) } @@ -270,20 +224,10 @@ func TestNewMetaProcessor_NilChainStorerShouldErr(t *testing.T) { func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - be, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - nil, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.RequestHandler = nil + + be, err := blproc.NewMetaProcessor(arguments) assert.Equal(t, process.ErrNilRequestHandler, err) assert.Nil(t, be) } @@ -291,9 +235,9 @@ func TestNewMetaProcessor_NilRequestHeaderHandlerShouldErr(t *testing.T) { func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, err := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + + mp, err := blproc.NewMetaProcessor(arguments) assert.Nil(t, err) assert.NotNil(t, mp) } @@ -303,9 +247,8 @@ func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.ProcessBlock(nil, &block.MetaBlock{}, blk, haveTime) @@ -315,9 +258,9 @@ func TestMetaProcessor_ProcessBlockWithNilBlockchainShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.ProcessBlock(&blockchain.MetaChain{}, nil, blk, haveTime) @@ -327,9 +270,8 @@ func TestMetaProcessor_ProcessBlockWithNilHeaderShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) err := mp.ProcessBlock(&blockchain.MetaChain{}, &block.MetaBlock{}, nil, haveTime) assert.Equal(t, process.ErrNilBlockBody, err) @@ -338,9 +280,8 @@ func TestMetaProcessor_ProcessBlockWithNilBlockBodyShouldErr(t *testing.T) { func TestMetaProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.ProcessBlock(&blockchain.MetaChain{}, &block.MetaBlock{}, blk, nil) @@ -350,7 +291,6 @@ func TestMetaProcessor_ProcessBlockWithNilHaveTimeFuncShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() // set accounts dirty journalLen := func() int { return 3 } revToSnapshot := func(snapshot int) error { return nil } @@ -363,22 +303,13 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { RootHash: []byte("roothash"), } body := &block.MetaBlockBody{} - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revToSnapshot, - }, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revToSnapshot, + } + mp, _ := blproc.NewMetaProcessor(arguments) + // should return err err := mp.ProcessBlock(blkc, &hdr, body, haveTime) assert.NotNil(t, err) @@ -388,9 +319,8 @@ func TestMetaProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{} hdr := &block.MetaBlock{ @@ -404,9 +334,8 @@ func TestMetaProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ Round: 1, @@ -418,6 +347,7 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { Nonce: 3, } body := &block.MetaBlockBody{} + err := mp.ProcessBlock(blkc, hdr, body, haveTime) assert.Equal(t, process.ErrWrongNonceInBlock, err) } @@ -425,9 +355,8 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) { func TestMetaProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - genesisBlocks := createGenesisBlocks(mock.NewOneShardCoordinatorMock()) - mp, _ := blproc.NewMetaProcessorBasicSingleShard(mdp, genesisBlocks) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ Round: 1, @@ -441,6 +370,7 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T } body := &block.MetaBlockBody{} + err := mp.ProcessBlock(blkc, hdr, body, haveTime) assert.Equal(t, process.ErrBlockHashDoesNotMatch, err) } @@ -448,7 +378,6 @@ func TestMetaProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing.T func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() blkc := &blockchain.MetaChain{ CurrentBlock: &block.MetaBlock{ Nonce: 0, @@ -466,34 +395,24 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState rootHashCalled := func() ([]byte, error) { return []byte("rootHashX"), nil } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: journalLen, - RevertToSnapshotCalled: revertToSnapshot, - RootHashCalled: rootHashCalled, - }, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: journalLen, + RevertToSnapshotCalled: revertToSnapshot, + RootHashCalled: rootHashCalled, + } + mp, _ := blproc.NewMetaProcessor(arguments) go func() { mp.ChRcvAllHdrs() <- true }() // should return err - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) hdr.ShardInfo = make([]block.ShardData, 0) err := mp.ProcessBlock(blkc, hdr, body, haveTime) - assert.Equal(t, process.ErrRootStateMissmatch, err) + assert.Equal(t, process.ErrRootStateDoesNotMatch, err) assert.True(t, wasCalled) } @@ -502,24 +421,13 @@ func TestMetaProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState func TestMetaProcessor_ProcessBlockHeaderShouldPass(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - accounts := &mock.AccountsStub{} - accounts.RevertToSnapshotCalled = func(snapshot int) error { - return nil + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + mp, _ := blproc.NewMetaProcessor(arguments) txHash := []byte("txhash") txHashes := make([][]byte, 0) @@ -561,19 +469,11 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { accounts.RevertToSnapshotCalled = func(snapshot int) error { return nil } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(3), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(3) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) + arguments.DataPool = mdp + mp, _ := blproc.NewMetaProcessor(arguments) mdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { cs := &mock.Uint64SyncMapCacherStub{} cs.GetCalled = func(key uint64) (dataRetriever.ShardIdHashMap, bool) { @@ -584,11 +484,11 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { } return cs } - mp.AddHdrHashToRequestedList([]byte("header_hash")) - mp.SetCurrHighestShardHdrsNonces(0, 1) - mp.SetCurrHighestShardHdrsNonces(1, 2) - mp.SetCurrHighestShardHdrsNonces(2, 3) - res := mp.RequestFinalMissingHeaders() + mp.AddHdrHashToRequestedList(&block.Header{}, []byte("header_hash")) + mp.SetHighestHdrNonceForCurrentBlock(0, 1) + mp.SetHighestHdrNonceForCurrentBlock(1, 2) + mp.SetHighestHdrNonceForCurrentBlock(2, 3) + res := mp.RequestMissingFinalityAttestingHeaders() assert.Equal(t, res, uint32(3)) } @@ -597,24 +497,13 @@ func TestMetaProcessor_RequestFinalMissingHeaderShouldPass(t *testing.T) { func TestMetaProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - accounts := &mock.AccountsStub{} - accounts.RevertToSnapshotCalled = func(snapshot int) error { - return nil + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + return nil + }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + mp, _ := blproc.NewMetaProcessor(arguments) blk := &block.MetaBlockBody{} err := mp.CommitBlock(nil, &block.MetaBlock{}, blk) assert.Equal(t, process.ErrNilBlockChain, err) @@ -623,7 +512,6 @@ func TestMetaProcessor_CommitBlockNilBlockchainShouldErr(t *testing.T) { func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() accounts := &mock.AccountsStub{ RevertToSnapshotCalled: func(snapshot int) error { return nil @@ -641,19 +529,10 @@ func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T return []byte("obj"), nil }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - marshalizer, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Accounts = accounts + arguments.Marshalizer = marshalizer + mp, _ := blproc.NewMetaProcessor(arguments) blkc := createTestBlockchain() err := mp.CommitBlock(blkc, hdr, body) assert.Equal(t, errMarshalizer, err) @@ -662,7 +541,6 @@ func TestMetaProcessor_CommitBlockMarshalizerFailForHeaderShouldErr(t *testing.T func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() wasCalled := false errPersister := errors.New("failure") accounts := &mock.AccountsStub{ @@ -681,27 +559,23 @@ func TestMetaProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) { store := initStore() store.AddStorer(dataRetriever.MetaBlockUnit, hdrUnit) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{ - AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { - return nil - }, + arguments := createMockMetaArguments() + arguments.Accounts = accounts + arguments.Store = store + arguments.ForkDetector = &mock.ForkDetectorMock{ + AddHeaderCalled: func(header data.HeaderHandler, hash []byte, state process.BlockHeaderState, finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) error { + return nil }, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, + } + mp, _ := blproc.NewMetaProcessor(arguments) blkc, _ := blockchain.NewMetaChain( generateTestCache(), ) + mp.SetHdrForCurrentBlock([]byte("hdr_hash1"), &block.Header{}, true) err := mp.CommitBlock(blkc, hdr, body) assert.True(t, wasCalled) assert.Nil(t, err) @@ -720,19 +594,11 @@ func TestMetaProcessor_CommitBlockNilNoncesDataPoolShouldErr(t *testing.T) { body := &block.MetaBlockBody{} store := initStore() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Accounts = accounts + arguments.Store = store + arguments.DataPool = mdp + mp, _ := blproc.NewMetaProcessor(arguments) mdp.HeadersNoncesCalled = func() dataRetriever.Uint64SyncMapCacher { return nil @@ -758,19 +624,13 @@ func TestMetaProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { hasher := &mock.HasherStub{} store := initStore() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - fd, - mock.NewOneShardCoordinatorMock(), - hasher, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.DataPool = mdp + arguments.Accounts = accounts + arguments.ForkDetector = fd + arguments.Store = store + arguments.Hasher = hasher + mp, _ := blproc.NewMetaProcessor(arguments) mdp.ShardHeadersCalled = func() storage.Cacher { return &mock.CacherStub{ @@ -810,6 +670,9 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { return errors.New("should have not got here") }, + GetHighestFinalBlockNonceCalled: func() uint64 { + return 0 + }, } hasher := &mock.HasherStub{} blockHeaderUnit := &mock.StorerStub{ @@ -820,19 +683,13 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { store := initStore() store.AddStorer(dataRetriever.BlockHeaderUnit, blockHeaderUnit) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - accounts, - mdp, - fd, - mock.NewOneShardCoordinatorMock(), - hasher, - &mock.MarshalizerMock{}, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.DataPool = mdp + arguments.Accounts = accounts + arguments.ForkDetector = fd + arguments.Store = store + arguments.Hasher = hasher + mp, _ := blproc.NewMetaProcessor(arguments) removeHdrWasCalled := false mdp.ShardHeadersCalled = func() storage.Cacher { @@ -855,6 +712,7 @@ func TestMetaProcessor_CommitBlockOkValsShouldWork(t *testing.T) { blkc := createTestBlockchain() + mp.SetHdrForCurrentBlock([]byte("hdr_hash1"), &block.Header{}, true) err := mp.CommitBlock(blkc, hdr, body) assert.Nil(t, err) assert.True(t, removeHdrWasCalled) @@ -867,19 +725,12 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { t.Parallel() mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + arguments := createMockMetaArguments() + arguments.DataPool = mdp + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + mdp.ShardHeadersCalled = func() storage.Cacher { cs := &mock.CacherStub{} cs.RegisterHandlerCalled = func(i func(key []byte)) { @@ -903,20 +754,11 @@ func TestBlockProc_RequestTransactionFromNetwork(t *testing.T) { func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + err := mp.RemoveBlockInfoFromPool(nil) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilMetaBlockHeader) @@ -925,21 +767,13 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldErrNilMetaBlockHeader(t *tes func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + header := createMetaBlockHeader() + mp.SetHdrForCurrentBlock([]byte("hdr_hash1"), &block.Header{}, true) err := mp.RemoveBlockInfoFromPool(header) assert.Nil(t, err) } @@ -947,24 +781,17 @@ func TestMetaProcessor_RemoveBlockInfoFromPoolShouldWork(t *testing.T) { func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFail(t *testing.T) { t.Parallel() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: func() int { - return 1 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: func() int { + return 1 }, - initMetaDataPool(), - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + } + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } + hdr, err := mp.CreateBlockHeader(nil, 0, haveTime) assert.NotNil(t, err) assert.Nil(t, hdr) @@ -973,28 +800,20 @@ func TestMetaProcessor_CreateBlockHeaderShouldNotReturnNilWhenCreateShardInfoFai func TestMetaProcessor_CreateBlockHeaderShouldWork(t *testing.T) { t.Parallel() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - JournalLenCalled: func() int { - return 0 - }, - RootHashCalled: func() ([]byte, error) { - return []byte("root"), nil - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + JournalLenCalled: func() int { + return 0 }, - initMetaDataPool(), - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - + RootHashCalled: func() ([]byte, error) { + return []byte("root"), nil + }, + } + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } + hdr, err := mp.CreateBlockHeader(nil, 0, haveTime) assert.Nil(t, err) assert.NotNil(t, hdr) @@ -1009,21 +828,15 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) journalEntries = 0 return nil } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: revToSnapshot, - }, - initMetaDataPool(), - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: revToSnapshot, + } + arguments.DataPool = initMetaDataPool() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + err := mp.CommitBlock(nil, nil, nil) assert.NotNil(t, err) assert.Equal(t, 0, journalEntries) @@ -1032,20 +845,9 @@ func TestMetaProcessor_CommitBlockShouldRevertAccountStateWhenErr(t *testing.T) func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) msh, mstx, err := mp.MarshalizedDataToBroadcast(&block.MetaBlock{}, &block.MetaBlockBody{}) assert.Nil(t, err) @@ -1055,44 +857,34 @@ func TestMetaProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { //------- receivedHeader -func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { +func TestMetaProcessor_ReceivedHeaderShouldDecreaseMissing(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - pool, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.DataPool = pool + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) //add 3 tx hashes on requested list hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") - mp.AddHdrHashToRequestedList(hdrHash1) - mp.AddHdrHashToRequestedList(hdrHash2) - mp.AddHdrHashToRequestedList(hdrHash3) + hdr2 := &block.Header{Nonce: 2} + + mp.AddHdrHashToRequestedList(nil, hdrHash1) + mp.AddHdrHashToRequestedList(nil, hdrHash2) + mp.AddHdrHashToRequestedList(nil, hdrHash3) //received txHash2 - hdr := &block.Header{Nonce: 1} - pool.ShardHeaders().Put(hdrHash2, hdr) - mp.ReceivedHeader(hdrHash2) + pool.ShardHeaders().Put(hdrHash2, hdr2) + + time.Sleep(100 * time.Millisecond) - assert.True(t, mp.IsHdrHashRequested(hdrHash1)) - assert.False(t, mp.IsHdrHashRequested(hdrHash2)) - assert.True(t, mp.IsHdrHashRequested(hdrHash3)) + assert.True(t, mp.IsHdrMissing(hdrHash1)) + assert.False(t, mp.IsHdrMissing(hdrHash2)) + assert.True(t, mp.IsHdrMissing(hdrHash3)) } //------- createShardInfo @@ -1100,12 +892,8 @@ func TestMetaProcessor_ReceivedHeaderShouldEraseRequested(t *testing.T) { func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1147,27 +935,22 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) ShardId: 2, MiniBlockHeaders: miniBlockHeaders3}) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + noOfShards := uint32(5) + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(5), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(5)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } round := uint64(10) @@ -1195,12 +978,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotValid(t *testing.T) func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1226,27 +1005,21 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(5)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } @@ -1282,7 +1055,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) PrevHash: prevHash, MiniBlockHeaders: miniBlockHeaders3}) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) round := uint64(40) shardInfo, err := mp.CreateShardInfo(3, round, haveTime) assert.Nil(t, err) @@ -1308,12 +1081,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkNoHdrAddedNotFinal(t *testing.T) func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1343,27 +1112,21 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } @@ -1448,7 +1211,7 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { pool.ShardHeaders().Put(hdrHash3, headers[4]) pool.ShardHeaders().Put(hdrHash33, headers[5]) - mp.SetNextKValidity(1) + mp.SetShardBlockFinality(1) round := uint64(15) shardInfo, err := mp.CreateShardInfo(3, round, haveTime) assert.Nil(t, err) @@ -1474,12 +1237,8 @@ func TestMetaProcessor_CreateShardInfoShouldWorkHdrsAdded(t *testing.T) { func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - //we will have a 3 hdrs in pool - hdrHash1 := []byte("hdr hash 1") hdrHash2 := []byte("hdr hash 2") hdrHash3 := []byte("hdr hash 3") @@ -1509,27 +1268,21 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { miniBlockHeaders3 = append(miniBlockHeaders3, miniBlockHeader1) noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) haveTime := func() bool { return true } @@ -1614,7 +1367,7 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { pool.ShardHeaders().Put(hdrHash3, headers[4]) pool.ShardHeaders().Put(hdrHash33, headers[5]) - mp.SetNextKValidity(1) + mp.SetShardBlockFinality(1) round := uint64(20) shardInfo, err := mp.CreateShardInfo(3, round, haveTime) assert.Nil(t, err) @@ -1640,20 +1393,10 @@ func TestMetaProcessor_CreateShardInfoEmptyBlockHDRRoundTooHigh(t *testing.T) { func TestMetaProcessor_RestoreBlockIntoPoolsShouldErrNilMetaBlockHeader(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - &mock.MarshalizerMock{}, - initStore(), - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) + err := mp.RestoreBlockIntoPools(nil, nil) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilMetaBlockHeader) @@ -1664,8 +1407,6 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { pool := mock.NewMetaPoolsHolderFake() marshalizerMock := &mock.MarshalizerMock{} - hasherMock := &mock.HasherStub{} - body := &block.MetaBlockBody{} hdr := block.Header{Nonce: 1} buffHdr, _ := marshalizerMock.Marshal(hdr) @@ -1684,19 +1425,10 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { }, } - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - pool, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - hasherMock, - marshalizerMock, - store, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + arguments.DataPool = pool + arguments.Store = store + mp, _ := blproc.NewMetaProcessor(arguments) mhdr := createMetaBlockHeader() @@ -1710,32 +1442,24 @@ func TestMetaProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.Hasher = &mock.HasherMock{} + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -1784,6 +1508,8 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { // wrong header type in pool and defer called pool.ShardHeaders().Put(currHash, metaHdr) pool.ShardHeaders().Put(prevHash, prevHdr) + mp.SetHdrForCurrentBlock(currHash, metaHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) err = mp.SaveLastNotarizedHeader(metaHdr) assert.Equal(t, process.ErrWrongTypeAssertion, err) @@ -1793,6 +1519,9 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { // put headers in pool pool.ShardHeaders().Put(currHash, currHdr) pool.ShardHeaders().Put(prevHash, prevHdr) + mp.CreateBlockStarted() + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) err = mp.SaveLastNotarizedHeader(metaHdr) assert.Nil(t, err) @@ -1803,32 +1532,24 @@ func TestMetaProcessor_CreateLastNotarizedHdrs(t *testing.T) { func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.Hasher = &mock.HasherMock{} + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -1879,7 +1600,10 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { shDataPrev := block.ShardData{ShardId: 0, HeaderHash: prevHash} metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) - _, err := mp.CheckShardHeadersValidity(metaHdr) + mp.SetHdrForCurrentBlock(wrongCurrHash, wrongCurrHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + + _, err := mp.CheckShardHeadersValidity() assert.Equal(t, process.ErrWrongNonceInBlock, err) shDataCurr = block.ShardData{ShardId: 0, HeaderHash: currHash} @@ -1888,7 +1612,11 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { shDataPrev = block.ShardData{ShardId: 0, HeaderHash: prevHash} metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataPrev) - highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) + mp.CreateBlockStarted() + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + mp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + + highestNonceHdrs, err := mp.CheckShardHeadersValidity() assert.Nil(t, err) assert.NotNil(t, highestNonceHdrs) assert.Equal(t, currHdr.Nonce, highestNonceHdrs[currHdr.ShardId].GetNonce()) @@ -1897,32 +1625,23 @@ func TestMetaProcessor_CheckShardHeadersValidity(t *testing.T) { func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -1946,7 +1665,9 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi metaHdr.ShardInfo = make([]block.ShardData, 0) metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataCurr) - highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + + highestNonceHdrs, err := mp.CheckShardHeadersValidity() assert.Nil(t, highestNonceHdrs) assert.Equal(t, process.ErrWrongNonceInBlock, err) } @@ -1954,32 +1675,24 @@ func TestMetaProcessor_CheckShardHeadersValidityWrongNonceFromLastNoted(t *testi func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2003,12 +1716,12 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) metaHdr.ShardInfo = make([]block.ShardData, 0) metaHdr.ShardInfo = append(metaHdr.ShardInfo, shDataCurr) - highestNonceHdrs, err := mp.CheckShardHeadersValidity(metaHdr) - assert.Nil(t, highestNonceHdrs) - assert.Equal(t, process.ErrMissingHeader, err) + highestNonceHdrs, err := mp.CheckShardHeadersValidity() + assert.Equal(t, 0, len(highestNonceHdrs)) pool.ShardHeaders().Put(currHash, currHdr) - highestNonceHdrs, err = mp.CheckShardHeadersValidity(metaHdr) + mp.SetHdrForCurrentBlock(currHash, currHdr, true) + highestNonceHdrs, err = mp.CheckShardHeadersValidity() assert.NotNil(t, highestNonceHdrs) assert.Nil(t, err) assert.Equal(t, currHdr.Nonce, highestNonceHdrs[currHdr.ShardId].GetNonce()) @@ -2017,32 +1730,23 @@ func TestMetaProcessor_CheckShardHeadersValidityRoundZeroLastNoted(t *testing.T) func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2083,29 +1787,33 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { prevHash, _ = mp.ComputeHeaderHash(nextWrongHdr) pool.ShardHeaders().Put(prevHash, nextWrongHdr) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) metaHdr := &block.MetaBlock{Round: 1} highestNonceHdrs := make(map[uint32]data.HeaderHandler) for i := uint32(0); i < noOfShards; i++ { - highestNonceHdrs[i] = mp.LastNotarizedHdrForShard(i) + highestNonceHdrs[i] = nil } - err := mp.CheckShardHeadersFinality(nil, highestNonceHdrs) + err := mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Equal(t, process.ErrNilBlockHeader, err) + for i := uint32(0); i < noOfShards; i++ { + highestNonceHdrs[i] = mp.LastNotarizedHdrForShard(i) + } + // should work for empty highest nonce hdrs - no hdrs added this round to metablock - err = mp.CheckShardHeadersFinality(metaHdr, nil) + err = mp.CheckShardHeadersFinality(nil) assert.Nil(t, err) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) highestNonceHdrs = make(map[uint32]data.HeaderHandler, 0) highestNonceHdrs[0] = currHdr - err = mp.CheckShardHeadersFinality(metaHdr, highestNonceHdrs) + err = mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Nil(t, err) - mp.SetNextKValidity(1) - err = mp.CheckShardHeadersFinality(metaHdr, highestNonceHdrs) + mp.SetShardBlockFinality(1) + err = mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Equal(t, process.ErrHeaderNotFinal, err) prevHash, _ = mp.ComputeHeaderHash(currHdr) @@ -2118,43 +1826,35 @@ func TestMetaProcessor_CheckShardHeadersFinality(t *testing.T) { PrevHash: prevHash, RootHash: []byte("currRootHash")} - prevHash, _ = mp.ComputeHeaderHash(nextHdr) - pool.ShardHeaders().Put(prevHash, nextHdr) + nextHash, _ := mp.ComputeHeaderHash(nextHdr) + pool.ShardHeaders().Put(nextHash, nextHdr) + mp.SetHdrForCurrentBlock(nextHash, nextHdr, false) metaHdr.Round = 20 - err = mp.CheckShardHeadersFinality(metaHdr, highestNonceHdrs) + err = mp.CheckShardHeadersFinality(highestNonceHdrs) assert.Nil(t, err) } func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.Store = initStore() + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2197,7 +1897,7 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { currHdr.Nonce = 0 prevHdr.Nonce = 0 err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateMissmatch) + assert.Equal(t, err, process.ErrRootStateDoesNotMatch) currHdr.Nonce = 0 prevHdr.Nonce = 0 @@ -2209,7 +1909,7 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { prevHdr.Nonce = 45 prevHdr.Round = currHdr.Round + 1 err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) + assert.Equal(t, err, process.ErrLowerRoundInBlock) prevHdr.Round = currHdr.Round - 1 currHdr.Nonce = prevHdr.Nonce + 2 @@ -2217,16 +1917,17 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { assert.Equal(t, err, process.ErrWrongNonceInBlock) currHdr.Nonce = prevHdr.Nonce + 1 - prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash = []byte("wronghash") err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRandSeedMismatch) + assert.Equal(t, err, process.ErrBlockHashDoesNotMatch) - prevHdr.RandSeed = currRandSeed - currHdr.PrevHash = []byte("wronghash") + prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash, _ = mp.ComputeHeaderHash(prevHdr) err = mp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) + assert.Equal(t, err, process.ErrRandSeedDoesNotMatch) currHdr.PrevHash = prevHash + prevHdr.RandSeed = currRandSeed prevHdr.RootHash = []byte("prevRootHash") err = mp.IsHdrConstructionValid(currHdr, prevHdr) assert.Nil(t, err) @@ -2235,32 +1936,24 @@ func TestMetaProcessor_IsHdrConstructionValid(t *testing.T) { func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { t.Parallel() - hasher := mock.HasherMock{} - marshalizer := &mock.MarshalizerMock{} pool := mock.NewMetaPoolsHolderFake() - noOfShards := uint32(5) - mp, _ := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{ - RevertToSnapshotCalled: func(snapshot int) error { - assert.Fail(t, "revert should have not been called") - return nil - }, - JournalLenCalled: func() int { - return 0 - }, + + arguments := createMockMetaArguments() + arguments.Accounts = &mock.AccountsStub{ + RevertToSnapshotCalled: func(snapshot int) error { + assert.Fail(t, "revert should have not been called") + return nil }, - pool, - &mock.ForkDetectorMock{}, - mock.NewMultiShardsCoordinatorMock(noOfShards), - hasher, - marshalizer, - initStore(), - createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + JournalLenCalled: func() int { + return 0 + }, + } + arguments.DataPool = pool + arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(noOfShards) + arguments.StartHeaders = createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(noOfShards)) + arguments.Store = initStore() + mp, _ := blproc.NewMetaProcessor(arguments) prevRandSeed := []byte("prevrand") currRandSeed := []byte("currrand") @@ -2317,12 +2010,12 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { assert.False(t, valid) assert.Nil(t, hdrIds) - mp.SetNextKValidity(0) + mp.SetShardBlockFinality(0) valid, hdrIds = mp.IsShardHeaderValidFinal(currHdr, prevHdr, srtShardHdrs) assert.True(t, valid) assert.NotNil(t, hdrIds) - mp.SetNextKValidity(1) + mp.SetShardBlockFinality(1) nextWrongHdr := &block.Header{ Round: 12, Nonce: 44, @@ -2355,21 +2048,10 @@ func TestMetaProcessor_IsShardHeaderValidFinal(t *testing.T) { func TestMetaProcessor_DecodeBlockBody(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() + marshalizerMock := &mock.MarshalizerMock{} - mp, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - marshalizerMock, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) body := &block.MetaBlockBody{} message, err := marshalizerMock.Marshal(body) assert.Nil(t, err) @@ -2383,21 +2065,10 @@ func TestMetaProcessor_DecodeBlockBody(t *testing.T) { func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() + marshalizerMock := &mock.MarshalizerMock{} - mp, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - marshalizerMock, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) hdr := &block.MetaBlock{} hdr.Nonce = 1 hdr.TimeStamp = uint64(0) @@ -2419,24 +2090,8 @@ func TestMetaProcessor_DecodeBlockHeader(t *testing.T) { func TestMetaProcessor_UpdateShardsHeadersNonce_ShouldWork(t *testing.T) { t.Parallel() - mdp := initMetaDataPool() - marshalizerMock := &mock.MarshalizerMock{} - mp, err := blproc.NewMetaProcessor( - &mock.ServiceContainerMock{}, - &mock.AccountsStub{}, - mdp, - &mock.ForkDetectorMock{}, - mock.NewOneShardCoordinatorMock(), - &mock.HasherStub{}, - marshalizerMock, - &mock.ChainStorerMock{}, - createGenesisBlocks(mock.NewOneShardCoordinatorMock()), - &mock.RequestHandlerMock{}, - &mock.Uint64ByteSliceConverterMock{}, - ) - if err != nil { - assert.NotNil(t, err) - } + arguments := createMockMetaArguments() + mp, _ := blproc.NewMetaProcessor(arguments) numberOfShards := uint32(4) type DataForMap struct { diff --git a/process/block/metrics.go b/process/block/metrics.go index 72e3b7656ae..9d031db97e4 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -1,9 +1,14 @@ package block import ( + "time" + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/core/indexer" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/sharding" ) func getMetricsFromMetaHeader( @@ -70,3 +75,67 @@ func getMetricsFromHeader( appStatusHandler.SetUInt64Value(core.MetricTxPoolLoad, numTxWithDst) appStatusHandler.SetUInt64Value(core.MetricNumProcessedTxs, uint64(totalTx)) } + +func saveRoundInfoInElastic( + elasticIndexer indexer.Indexer, + nodesCoordinator sharding.NodesCoordinator, + shardId uint32, + header data.HeaderHandler, + lastHeader data.HeaderHandler, + signersIndexes []uint64, +) { + roundInfo := indexer.RoundInfo{ + Index: header.GetRound(), + SignersIndexes: signersIndexes, + BlockWasProposed: true, + ShardId: shardId, + Timestamp: time.Duration(header.GetTimeStamp()), + } + + go elasticIndexer.SaveRoundInfo(roundInfo) + + if lastHeader == nil { + return + } + + lastBlockRound := lastHeader.GetRound() + currentBlockRound := header.GetRound() + roundDuration := calculateRoundDuration(lastHeader.GetTimeStamp(), header.GetTimeStamp(), lastBlockRound, currentBlockRound) + for i := lastBlockRound + 1; i < currentBlockRound; i++ { + publicKeys, err := nodesCoordinator.GetValidatorsPublicKeys(lastHeader.GetRandSeed(), i, shardId) + if err != nil { + continue + } + signersIndexes = nodesCoordinator.GetValidatorsIndexes(publicKeys) + roundInfo = indexer.RoundInfo{ + Index: i, + SignersIndexes: signersIndexes, + BlockWasProposed: false, + ShardId: shardId, + Timestamp: time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration)), + } + + go elasticIndexer.SaveRoundInfo(roundInfo) + } +} + +func calculateRoundDuration( + lastBlockTimestamp uint64, + currentBlockTimestamp uint64, + lastBlockRound uint64, + currentBlockRound uint64, +) uint64 { + if lastBlockTimestamp >= currentBlockTimestamp { + log.Error("last block timestamp is greater or equals than current block timestamp") + return 0 + } + if lastBlockRound >= currentBlockRound { + log.Error("last block round is greater or equals than current block round") + return 0 + } + + diffTimeStamp := currentBlockTimestamp - lastBlockTimestamp + diffRounds := currentBlockRound - lastBlockRound + + return diffTimeStamp / diffRounds +} diff --git a/process/block/metrics_test.go b/process/block/metrics_test.go new file mode 100644 index 00000000000..64bbe78822a --- /dev/null +++ b/process/block/metrics_test.go @@ -0,0 +1,20 @@ +package block + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMetrics_CalculateRoundDuration(t *testing.T) { + t.Parallel() + + lastBlockTimestamp := uint64(80) + currentBlockTimestamp := uint64(100) + lastBlockRound := uint64(5) + currentBlockRound := uint64(10) + expectedRoundDuration := uint64(4) + + roundDuration := calculateRoundDuration(lastBlockTimestamp, currentBlockTimestamp, lastBlockRound, currentBlockRound) + assert.Equal(t, expectedRoundDuration, roundDuration) +} diff --git a/process/block/poolsCleaner/txPoolsCleaner.go b/process/block/poolsCleaner/txPoolsCleaner.go new file mode 100644 index 00000000000..6e5f5aa4ea4 --- /dev/null +++ b/process/block/poolsCleaner/txPoolsCleaner.go @@ -0,0 +1,146 @@ +package poolsCleaner + +import ( + "sync/atomic" + "time" + + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// TxPoolsCleaner represents a pools cleaner that check if a transaction should be in pool +type TxPoolsCleaner struct { + accounts state.AccountsAdapter + shardCoordinator sharding.Coordinator + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + numRemovedTxs uint64 + canDoClean chan struct{} +} + +// NewTxsPoolsCleaner will return a new transaction pools cleaner +func NewTxsPoolsCleaner( + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + dataPool dataRetriever.PoolsHolder, + addrConverter state.AddressConverter, +) (*TxPoolsCleaner, error) { + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if dataPool == nil || dataPool.IsInterfaceNil() { + return nil, process.ErrNilDataPoolHolder + } + transactionPool := dataPool.Transactions() + if transactionPool == nil || transactionPool.IsInterfaceNil() { + return nil, process.ErrNilTransactionPool + } + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + + canDoClean := make(chan struct{}, 1) + + return &TxPoolsCleaner{ + accounts: accounts, + shardCoordinator: shardCoordinator, + dataPool: dataPool, + addrConverter: addrConverter, + numRemovedTxs: 0, + canDoClean: canDoClean, + }, nil +} + +// Clean will check if in pools exits transactions with nonce low that transaction sender account nonce +// and if tx have low nonce will be removed from pools +func (tpc *TxPoolsCleaner) Clean(duration time.Duration) (bool, error) { + if duration == 0 { + return false, process.ErrZeroMaxCleanTime + } + + select { + case tpc.canDoClean <- struct{}{}: + startTime := time.Now() + haveTime := func() bool { + return time.Now().Sub(startTime) < duration + } + + tpc.cleanPools(haveTime) + <-tpc.canDoClean + + return true, nil + default: + return false, nil + } +} + +func (tpc *TxPoolsCleaner) cleanPools(haveTime func() bool) { + shardId := tpc.shardCoordinator.SelfId() + transactions := tpc.dataPool.Transactions() + numOfShards := tpc.shardCoordinator.NumberOfShards() + + for destShardId := uint32(0); destShardId < numOfShards; destShardId++ { + cacherId := process.ShardCacherIdentifier(shardId, destShardId) + txsPool := transactions.ShardDataStore(cacherId) + + for _, key := range txsPool.Keys() { + if !haveTime() { + return + } + + obj, ok := txsPool.Peek(key) + if !ok { + continue + } + + tx, ok := obj.(*transaction.Transaction) + if !ok { + atomic.AddUint64(&tpc.numRemovedTxs, 1) + txsPool.Remove(key) + continue + } + + sndAddr := tx.GetSndAddress() + addr, err := tpc.addrConverter.CreateAddressFromPublicKeyBytes(sndAddr) + if err != nil { + txsPool.Remove(key) + atomic.AddUint64(&tpc.numRemovedTxs, 1) + continue + } + + accountHandler, err := tpc.accounts.GetExistingAccount(addr) + if err != nil { + txsPool.Remove(key) + atomic.AddUint64(&tpc.numRemovedTxs, 1) + continue + } + + accountNonce := accountHandler.GetNonce() + txNonce := tx.Nonce + lowerNonceInTx := txNonce < accountNonce + if lowerNonceInTx { + txsPool.Remove(key) + atomic.AddUint64(&tpc.numRemovedTxs, 1) + } + } + } +} + +// NumRemovedTxs will return the number of removed txs from pools +func (tpc *TxPoolsCleaner) NumRemovedTxs() uint64 { + return atomic.LoadUint64(&tpc.numRemovedTxs) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tpc *TxPoolsCleaner) IsInterfaceNil() bool { + if tpc == nil { + return true + } + return false +} diff --git a/process/block/poolsCleaner/txPoolsCleaner_test.go b/process/block/poolsCleaner/txPoolsCleaner_test.go new file mode 100644 index 00000000000..863f9d0ce1b --- /dev/null +++ b/process/block/poolsCleaner/txPoolsCleaner_test.go @@ -0,0 +1,288 @@ +package poolsCleaner_test + +import ( + "bytes" + "math/big" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/state/addressConverters" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/block/poolsCleaner" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" +) + +func getAccAdapter(nonce uint64, balance *big.Int) *mock.AccountsStub { + accDB := &mock.AccountsStub{} + accDB.GetExistingAccountCalled = func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return &state.Account{Nonce: nonce, Balance: balance}, nil + } + + return accDB +} + +func initDataPoolWithFourTransactions() *mock.PoolsHolderStub { + delayedFetchingKey := "key1" + validTxKey := "key2" + invalidTxKey := "key3" + + return &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + switch string(key) { + case delayedFetchingKey: + time.Sleep(time.Second) + return &transaction.Transaction{Nonce: 10}, true + case validTxKey: + return &transaction.Transaction{ + Nonce: 10, + SndAddr: []byte("address_address_address_address_"), + }, true + case invalidTxKey: + return &smartContractResult.SmartContractResult{}, true + default: + return nil, false + } + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte(delayedFetchingKey), []byte(validTxKey), []byte(invalidTxKey), []byte("key4")} + }, + LenCalled: func() int { + return 0 + }, + RemoveCalled: func(key []byte) { + return + }, + } + }, + } + }, + } +} + +func initDataPool(testHash []byte) *mock.PoolsHolderStub { + sdp := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if bytes.Equal(key, testHash) { + return &transaction.Transaction{Nonce: 10}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + } + }, + } + return sdp +} + +func TestNewTxsPoolsCleaner_NilAccountsShouldErr(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPool([]byte("test")) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(nil, shardCoordinator, tdp, addrConverter) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewTxsPoolsCleaner_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + tdp := initDataPool([]byte("test")) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, nil, tdp, addrConverter) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewTxsPoolsCleaner_NilDataPoolShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, nil, addrConverter) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilDataPoolHolder, err) +} + +func TestNewTxsPoolsCleaner_NilTransactionPoolShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := &mock.PoolsHolderStub{ + TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return nil + }, + } + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilTransactionPool, err) +} + +func TestNewTxsPoolsCleaner_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPool([]byte("test")) + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, nil) + + assert.Nil(t, txsPoolsCleaner) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewTxsPoolsCleaner_ShouldWork(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPool([]byte("test")) + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, err := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + assert.NotNil(t, txsPoolsCleaner) + assert.Nil(t, err) +} + +func TestTxPoolsCleaner_CleanNilSenderAddrShouldRemoveTx(t *testing.T) { + t.Parallel() + + maxCleanTime := time.Second + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolWithFourTransactions() + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + itRan, err := txsPoolsCleaner.Clean(maxCleanTime) + assert.Nil(t, err) + assert.Equal(t, true, itRan) + + numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() + assert.Equal(t, uint64(1), numRemovedTxs) +} + +func TestTxPoolsCleaner_CleanAccountNotExistsShouldRemoveTx(t *testing.T) { + t.Parallel() + + numRemovedTxsExpected := uint64(3) + cleanDuration := 2 * time.Second + accounts := &mock.AccountsStub{ + GetExistingAccountCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return nil, state.ErrAccNotFound + }, + } + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolWithFourTransactions() + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + itRan, err := txsPoolsCleaner.Clean(cleanDuration) + assert.Nil(t, err) + assert.Equal(t, true, itRan) + + numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() + assert.Equal(t, numRemovedTxsExpected, numRemovedTxs) +} + +func TestTxPoolsCleaner_CleanLowerAccountNonceShouldRemoveTx(t *testing.T) { + t.Parallel() + + numRemovedTxsExpected := uint64(3) + cleanDuration := 2 * time.Second + nonce := uint64(11) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolWithFourTransactions() + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + itRan, err := txsPoolsCleaner.Clean(cleanDuration) + assert.Nil(t, err) + assert.Equal(t, true, itRan) + + numRemovedTxs := txsPoolsCleaner.NumRemovedTxs() + assert.Equal(t, numRemovedTxsExpected, numRemovedTxs) +} + +func TestTxPoolsCleaner_CleanNilHaveTimeShouldErr(t *testing.T) { + t.Parallel() + + nonce := uint64(11) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolWithFourTransactions() + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + itRan, err := txsPoolsCleaner.Clean(0) + assert.Equal(t, process.ErrZeroMaxCleanTime, err) + assert.Equal(t, false, itRan) +} + +func TestTxPoolsCleaner_CleanWillDoNothingIfIsCalledMultipleTime(t *testing.T) { + t.Parallel() + + nonce := uint64(1) + balance := big.NewInt(1) + accounts := getAccAdapter(nonce, balance) + shardCoordinator := mock.NewOneShardCoordinatorMock() + tdp := initDataPoolWithFourTransactions() + addrConverter, _ := addressConverters.NewPlainAddressConverter(32, "0x") + txsPoolsCleaner, _ := poolsCleaner.NewTxsPoolsCleaner(accounts, shardCoordinator, tdp, addrConverter) + + go func() { + _, _ = txsPoolsCleaner.Clean(time.Second) + }() + time.Sleep(time.Millisecond) + go func() { + itRan, _ := txsPoolsCleaner.Clean(time.Second) + assert.Equal(t, false, itRan) + }() + + time.Sleep(2 * time.Second) +} diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 1c3d0426a6e..d3b07091c0b 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -71,21 +71,6 @@ func (bpp *basePreProcess) removeDataFromPools(body block.Body, miniBlockPool st return nil } -func (bpp *basePreProcess) restoreMiniBlock( - miniBlock *block.MiniBlock, - miniBlockHash []byte, - miniBlockPool storage.Cacher, -) []byte { - - miniBlockPool.Put(miniBlockHash, miniBlock) - //TODO: Analyze what is the scope of this check and return besides tests. Refactor this method - if miniBlock.SenderShardID != bpp.shardCoordinator.SelfId() { - return miniBlockHash - } - - return nil -} - func (bpp *basePreProcess) createMarshalizedData(txHashes [][]byte, forBlock *txsForBlock) ([][]byte, error) { mrsTxs := make([][]byte, 0) for _, txHash := range txHashes { @@ -178,9 +163,10 @@ func (bpp *basePreProcess) computeExistingAndMissing( currType block.Type, txPool dataRetriever.ShardedDataCacherNotifier, ) map[uint32][]*txsHashesInfo { + missingTxsForShard := make(map[uint32][]*txsHashesInfo, 0) - forBlock.mutTxsForBlock.Lock() + forBlock.mutTxsForBlock.Lock() for i := 0; i < len(body); i++ { miniBlock := body[i] if miniBlock.Type != currType { @@ -192,18 +178,19 @@ func (bpp *basePreProcess) computeExistingAndMissing( for j := 0; j < len(miniBlock.TxHashes); j++ { txHash := miniBlock.TxHashes[j] - tx, _ := process.GetTransactionHandlerFromPool( + tx, err := process.GetTransactionHandlerFromPool( miniBlock.SenderShardID, miniBlock.ReceiverShardID, txHash, txPool) - if tx == nil || tx.IsInterfaceNil() { + if err != nil { txHashes = append(txHashes, txHash) forBlock.missingTxs++ - } else { - forBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: tx, txShardInfo: txShardInfo} + continue } + + forBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: tx, txShardInfo: txShardInfo} } if len(txHashes) > 0 { @@ -211,7 +198,6 @@ func (bpp *basePreProcess) computeExistingAndMissing( &txsHashesInfo{txHashes: txHashes, receiverShardID: miniBlock.ReceiverShardID}) } } - forBlock.mutTxsForBlock.Unlock() return missingTxsForShard diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go new file mode 100644 index 00000000000..a9a7b07e421 --- /dev/null +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -0,0 +1,547 @@ +package preprocess + +import ( + "fmt" + "time" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +type rewardTxPreprocessor struct { + *basePreProcess + chReceivedAllRewardTxs chan bool + onRequestRewardTx func(shardID uint32, txHashes [][]byte) + rewardTxsForBlock txsForBlock + rewardTxPool dataRetriever.ShardedDataCacherNotifier + storage dataRetriever.StorageService + rewardsProcessor process.RewardTransactionProcessor + rewardsProducer process.InternalTransactionProducer + accounts state.AccountsAdapter +} + +// NewRewardTxPreprocessor creates a new reward transaction preprocessor object +func NewRewardTxPreprocessor( + rewardTxDataPool dataRetriever.ShardedDataCacherNotifier, + store dataRetriever.StorageService, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + rewardProcessor process.RewardTransactionProcessor, + rewardProducer process.InternalTransactionProducer, + shardCoordinator sharding.Coordinator, + accounts state.AccountsAdapter, + onRequestRewardTransaction func(shardID uint32, txHashes [][]byte), +) (*rewardTxPreprocessor, error) { + + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if rewardTxDataPool == nil || rewardTxDataPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardProcessor == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilRewardsTxProcessor + } + if rewardProducer == nil || rewardProcessor.IsInterfaceNil() { + return nil, process.ErrNilInternalTransactionProducer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if accounts == nil || accounts.IsInterfaceNil() { + return nil, process.ErrNilAccountsAdapter + } + if onRequestRewardTransaction == nil { + return nil, process.ErrNilRequestHandler + } + + bpp := &basePreProcess{ + hasher: hasher, + marshalizer: marshalizer, + shardCoordinator: shardCoordinator, + } + + rtp := &rewardTxPreprocessor{ + basePreProcess: bpp, + storage: store, + rewardTxPool: rewardTxDataPool, + onRequestRewardTx: onRequestRewardTransaction, + rewardsProcessor: rewardProcessor, + rewardsProducer: rewardProducer, + accounts: accounts, + } + + rtp.chReceivedAllRewardTxs = make(chan bool) + rtp.rewardTxPool.RegisterHandler(rtp.receivedRewardTransaction) + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + + return rtp, nil +} + +// waitForRewardTxHashes waits for a call whether all the requested smartContractResults appeared +func (rtp *rewardTxPreprocessor) waitForRewardTxHashes(waitTime time.Duration) error { + select { + case <-rtp.chReceivedAllRewardTxs: + return nil + case <-time.After(waitTime): + return process.ErrTimeIsOut + } +} + +// IsDataPrepared returns non error if all the requested reward transactions arrived and were saved into the pool +func (rtp *rewardTxPreprocessor) IsDataPrepared(requestedRewardTxs int, haveTime func() time.Duration) error { + if requestedRewardTxs > 0 { + log.Info(fmt.Sprintf("requested %d missing reward Txs\n", requestedRewardTxs)) + err := rtp.waitForRewardTxHashes(haveTime()) + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + missingRewardTxs := rtp.rewardTxsForBlock.missingTxs + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + log.Info(fmt.Sprintf("received %d missing reward Txs\n", requestedRewardTxs-missingRewardTxs)) + if err != nil { + return err + } + } + return nil +} + +// RemoveTxBlockFromPools removes reward transactions and miniblocks from associated pools +func (rtp *rewardTxPreprocessor) RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error { + if body == nil { + return process.ErrNilTxBlockBody + } + + return rtp.removeDataFromPools(body, miniBlockPool, rtp.rewardTxPool, block.RewardsBlock) +} + +// RestoreTxBlockIntoPools restores the reward transactions and miniblocks to associated pools +func (rtp *rewardTxPreprocessor) RestoreTxBlockIntoPools( + body block.Body, + miniBlockPool storage.Cacher, +) (int, error) { + if miniBlockPool == nil { + return 0, process.ErrNilMiniBlockPool + } + + rewardTxsRestored := 0 + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) + rewardTxBuff, err := rtp.storage.GetAll(dataRetriever.RewardTransactionUnit, miniBlock.TxHashes) + if err != nil { + return rewardTxsRestored, err + } + + for txHash, txBuff := range rewardTxBuff { + tx := rewardTx.RewardTx{} + err = rtp.marshalizer.Unmarshal(&tx, txBuff) + if err != nil { + return rewardTxsRestored, err + } + + rtp.rewardTxPool.AddData([]byte(txHash), &tx, strCache) + } + + miniBlockHash, err := core.CalculateHash(rtp.marshalizer, rtp.hasher, miniBlock) + if err != nil { + return rewardTxsRestored, err + } + + miniBlockPool.Put(miniBlockHash, miniBlock) + + err = rtp.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) + if err != nil { + return rewardTxsRestored, err + } + rewardTxsRestored += len(miniBlock.TxHashes) + } + + return rewardTxsRestored, nil +} + +// ProcessBlockTransactions processes all the reward transactions from the block.Body, updates the state +func (rtp *rewardTxPreprocessor) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { + rewardMiniBlocksSlice := make(block.MiniBlockSlice, 0) + computedRewardsMbsMap := rtp.rewardsProducer.CreateAllInterMiniBlocks() + for _, mb := range computedRewardsMbsMap { + rewardMiniBlocksSlice = append(rewardMiniBlocksSlice, mb) + } + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocksSlice) + + // basic validation already done in interceptors + for i := 0; i < len(body); i++ { + miniBlock := body[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + for j := 0; j < len(miniBlock.TxHashes); j++ { + if !haveTime() { + return process.ErrTimeIsOut + } + + txHash := miniBlock.TxHashes[j] + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + txData := rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + if txData == nil || txData.tx == nil { + return process.ErrMissingTransaction + } + + rTx, ok := txData.tx.(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + err := rtp.processRewardTransaction( + txHash, + rTx, + round, + miniBlock.SenderShardID, + miniBlock.ReceiverShardID, + ) + if err != nil { + return err + } + } + } + return nil +} + +// AddComputedRewardMiniBlocks adds to the local cache the reward transactions from the given miniblocks +func (rtp *rewardTxPreprocessor) AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) { + for _, rewardMb := range computedRewardMiniblocks { + txShardData := &txShardInfo{senderShardID: rewardMb.SenderShardID, receiverShardID: rewardMb.ReceiverShardID} + for _, txHash := range rewardMb.TxHashes { + tx, ok := rtp.rewardTxPool.SearchFirstData(txHash) + if !ok { + log.Error(process.ErrRewardTransactionNotFound.Error()) + continue + } + + rTx, ok := tx.(*rewardTx.RewardTx) + if !ok { + log.Error(process.ErrWrongTypeAssertion.Error()) + } + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{ + tx: rTx, + txShardInfo: txShardData, + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + } + } +} + +// SaveTxBlockToStorage saves the reward transactions from body into storage +func (rtp *rewardTxPreprocessor) SaveTxBlockToStorage(body block.Body) error { + for i := 0; i < len(body); i++ { + miniBlock := (body)[i] + if miniBlock.Type != block.RewardsBlock { + continue + } + + err := rtp.saveTxsToStorage( + miniBlock.TxHashes, + &rtp.rewardTxsForBlock, + rtp.storage, + dataRetriever.RewardTransactionUnit, + ) + if err != nil { + return err + } + } + + return nil +} + +// receivedRewardTransaction is a callback function called when a new reward transaction +// is added in the reward transactions pool +func (rtp *rewardTxPreprocessor) receivedRewardTransaction(txHash []byte) { + receivedAllMissing := rtp.baseReceivedTransaction(txHash, &rtp.rewardTxsForBlock, rtp.rewardTxPool) + + if receivedAllMissing { + rtp.chReceivedAllRewardTxs <- true + } +} + +// CreateBlockStarted cleans the local cache map for processed/created reward transactions at this round +func (rtp *rewardTxPreprocessor) CreateBlockStarted() { + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo = make(map[string]*txInfo) + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() +} + +// RequestBlockTransactions request for reward transactions if missing from a block.Body +func (rtp *rewardTxPreprocessor) RequestBlockTransactions(body block.Body) int { + requestedRewardTxs := 0 + missingRewardTxsForShards := rtp.computeMissingAndExistingRewardTxsForShards(body) + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for senderShardID, rewardTxHashes := range missingRewardTxsForShards { + for _, txHash := range rewardTxHashes { + rtp.setMissingTxsForShard(senderShardID, txHash) + } + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + for senderShardID, mbsRewardTxHashes := range missingRewardTxsForShards { + for _, mbRewardTxHashes := range mbsRewardTxHashes { + requestedRewardTxs += len(mbRewardTxHashes.txHashes) + rtp.onRequestRewardTx(senderShardID, mbRewardTxHashes.txHashes) + } + } + + return requestedRewardTxs +} + +func (rtp *rewardTxPreprocessor) setMissingTxsForShard(senderShardID uint32, mbTxHashes *txsHashesInfo) { + txShardData := &txShardInfo{senderShardID: senderShardID, receiverShardID: mbTxHashes.receiverShardID} + for _, txHash := range mbTxHashes.txHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: nil, txShardInfo: txShardData} + } +} + +// computeMissingAndExistingRewardTxsForShards calculates what reward transactions are available and what are missing +// from block.Body +func (rtp *rewardTxPreprocessor) computeMissingAndExistingRewardTxsForShards(body block.Body) map[uint32][]*txsHashesInfo { + rewardTxs := block.Body{} + for _, mb := range body { + if mb.Type != block.RewardsBlock { + continue + } + if mb.SenderShardID == rtp.shardCoordinator.SelfId() { + continue + } + + rewardTxs = append(rewardTxs, mb) + } + + missingTxsForShards := rtp.computeExistingAndMissing( + rewardTxs, + &rtp.rewardTxsForBlock, + rtp.chReceivedAllRewardTxs, + block.RewardsBlock, + rtp.rewardTxPool, + ) + + return missingTxsForShards +} + +// processRewardTransaction processes a reward transaction, if the transactions has an error it removes it from pool +func (rtp *rewardTxPreprocessor) processRewardTransaction( + rewardTxHash []byte, + rewardTx *rewardTx.RewardTx, + round uint64, + sndShardId uint32, + dstShardId uint32, +) error { + + err := rtp.rewardsProcessor.ProcessRewardTransaction(rewardTx) + if err != nil { + return err + } + + txShardData := &txShardInfo{senderShardID: sndShardId, receiverShardID: dstShardId} + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + rtp.rewardTxsForBlock.txHashAndInfo[string(rewardTxHash)] = &txInfo{tx: rewardTx, txShardInfo: txShardData} + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil +} + +// RequestTransactionsForMiniBlock requests missing reward transactions for a certain miniblock +func (rtp *rewardTxPreprocessor) RequestTransactionsForMiniBlock(mb block.MiniBlock) int { + missingRewardTxsForMiniBlock := rtp.computeMissingRewardTxsForMiniBlock(mb) + rtp.onRequestRewardTx(mb.SenderShardID, missingRewardTxsForMiniBlock) + + return len(missingRewardTxsForMiniBlock) +} + +// computeMissingRewardTxsForMiniBlock computes missing reward transactions for a certain miniblock +func (rtp *rewardTxPreprocessor) computeMissingRewardTxsForMiniBlock(mb block.MiniBlock) [][]byte { + missingRewardTxs := make([][]byte, 0) + if mb.Type != block.RewardsBlock { + return missingRewardTxs + } + + for _, txHash := range mb.TxHashes { + tx, _ := process.GetTransactionHandlerFromPool( + mb.SenderShardID, + mb.ReceiverShardID, + txHash, + rtp.rewardTxPool, + ) + + if tx == nil { + missingRewardTxs = append(missingRewardTxs, txHash) + } + } + + return missingRewardTxs +} + +// getAllRewardTxsFromMiniBlock gets all the reward transactions from a miniblock into a new structure +func (rtp *rewardTxPreprocessor) getAllRewardTxsFromMiniBlock( + mb *block.MiniBlock, + haveTime func() bool, +) ([]*rewardTx.RewardTx, [][]byte, error) { + + strCache := process.ShardCacherIdentifier(mb.SenderShardID, mb.ReceiverShardID) + txCache := rtp.rewardTxPool.ShardDataStore(strCache) + if txCache == nil { + return nil, nil, process.ErrNilRewardTxDataPool + } + + // verify if all reward transactions exists + rewardTxs := make([]*rewardTx.RewardTx, 0) + txHashes := make([][]byte, 0) + for _, txHash := range mb.TxHashes { + if !haveTime() { + return nil, nil, process.ErrTimeIsOut + } + + tmp, ok := txCache.Peek(txHash) + if !ok { + return nil, nil, process.ErrNilRewardTransaction + } + + tx, ok := tmp.(*rewardTx.RewardTx) + if !ok { + return nil, nil, process.ErrWrongTypeAssertion + } + + txHashes = append(txHashes, txHash) + rewardTxs = append(rewardTxs, tx) + } + + return rewardTxs, txHashes, nil +} + +// CreateAndProcessMiniBlock creates the miniblock from storage and processes the reward transactions added into the miniblock +func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { + return nil, nil +} + +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (rtp *rewardTxPreprocessor) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + _ func() bool, +) (block.MiniBlockSlice, error) { + + // always have time for rewards + haveTime := func() bool { + return true + } + + rewardMiniBlocksSlice := make(block.MiniBlockSlice, 0) + computedRewardsMbsMap := rtp.rewardsProducer.CreateAllInterMiniBlocks() + for _, mb := range computedRewardsMbsMap { + rewardMiniBlocksSlice = append(rewardMiniBlocksSlice, mb) + } + + snapshot := rtp.accounts.JournalLen() + + for _, mb := range rewardMiniBlocksSlice { + err := rtp.ProcessMiniBlock(mb, haveTime, round) + + if err != nil { + log.Error(err.Error()) + errAccountState := rtp.accounts.RevertToSnapshot(snapshot) + if errAccountState != nil { + // TODO: evaluate if reloading the trie from disk will might solve the problem + log.Error(errAccountState.Error()) + } + return nil, err + } + } + + return rewardMiniBlocksSlice, nil +} + +// ProcessMiniBlock processes all the reward transactions from a miniblock and saves the processed reward transactions +// in local cache +func (rtp *rewardTxPreprocessor) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { + if miniBlock.Type != block.RewardsBlock { + return process.ErrWrongTypeInMiniBlock + } + + miniBlockRewardTxs, miniBlockTxHashes, err := rtp.getAllRewardTxsFromMiniBlock(miniBlock, haveTime) + if err != nil { + return err + } + + for index := range miniBlockRewardTxs { + if !haveTime() { + return process.ErrTimeIsOut + } + + err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[index]) + if err != nil { + return err + } + } + + txShardData := &txShardInfo{senderShardID: miniBlock.SenderShardID, receiverShardID: miniBlock.ReceiverShardID} + + rtp.rewardTxsForBlock.mutTxsForBlock.Lock() + for index, txHash := range miniBlockTxHashes { + rtp.rewardTxsForBlock.txHashAndInfo[string(txHash)] = &txInfo{tx: miniBlockRewardTxs[index], txShardInfo: txShardData} + } + rtp.rewardTxsForBlock.mutTxsForBlock.Unlock() + + return nil +} + +// CreateMarshalizedData marshalizes reward transaction hashes and and saves them into a new structure +func (rtp *rewardTxPreprocessor) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + marshaledRewardTxs, err := rtp.createMarshalizedData(txHashes, &rtp.rewardTxsForBlock) + if err != nil { + return nil, err + } + + return marshaledRewardTxs, nil +} + +// GetAllCurrentUsedTxs returns all the reward transactions used at current creation / processing +func (rtp *rewardTxPreprocessor) GetAllCurrentUsedTxs() map[string]data.TransactionHandler { + rewardTxPool := make(map[string]data.TransactionHandler) + + rtp.rewardTxsForBlock.mutTxsForBlock.RLock() + for txHash, txData := range rtp.rewardTxsForBlock.txHashAndInfo { + rewardTxPool[txHash] = txData.tx + } + rtp.rewardTxsForBlock.mutTxsForBlock.RUnlock() + + return rewardTxPool +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtp *rewardTxPreprocessor) IsInterfaceNil() bool { + if rtp == nil { + return true + } + return false +} diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go new file mode 100644 index 00000000000..226b432cc05 --- /dev/null +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -0,0 +1,695 @@ +package preprocess + +import ( + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/storage" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxPreprocessor_NilRewardTxDataPoolShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := NewRewardTxPreprocessor( + nil, + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxPreprocessor_NilStoreShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.Transactions(), + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewRewardTxPreprocessor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.Transactions(), + &mock.ChainStorerMock{}, + nil, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxPreprocessor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + nil, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxPreprocessor_NilRewardTxProcessorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRewardsTxProcessor, err) +} + +func TestNewRewardTxPreprocessor_NilRewardProducerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilInternalTransactionProducer, err) +} + +func TestNewRewardTxPreprocessor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + nil, + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxPreprocessor_NilAccountsAdapterShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewRewardTxPreprocessor_NilRequestHandlerShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + nil, + ) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilRequestHandler, err) +} + +func TestNewRewardTxPreprocessor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, err := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + assert.Nil(t, err) + assert.NotNil(t, rtp) +} + +func TestRewardTxPreprocessor_AddComputedRewardMiniBlocksShouldAddMiniBlock(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + + tdp := initDataPool() + + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + assert.NotNil(t, rtp) + + txHashes := [][]byte{[]byte(txHash)} + + var rewardMiniBlocks block.MiniBlockSlice + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: 0, + } + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + res := rtp.GetAllCurrentUsedTxs() + + if _, ok := res[txHash]; !ok { + assert.Fail(t, "miniblock was not added") + } +} + +func TestRewardTxPreprocessor_CreateMarshalizedDataShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + var rewardMiniBlocks block.MiniBlockSlice + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + res, err := rtp.CreateMarshalizedData(txHashes) + assert.Nil(t, err) + assert.Equal(t, 1, len(res)) +} + +func TestRewardTxPreprocessor_ProcessMiniBlockInvalidMiniBlockTypeShouldErr(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: 0, + } + + err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, 0) + assert.Equal(t, process.ErrWrongTypeInMiniBlock, err) +} + +func TestRewardTxPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + err := rtp.ProcessMiniBlock(&mb1, haveTimeTrue, 0) + assert.Nil(t, err) + + txsMap := rtp.GetAllCurrentUsedTxs() + if _, ok := txsMap[txHash]; !ok { + assert.Fail(t, "miniblock was not added") + } +} + +func TestRewardTxPreprocessor_SaveTxBlockToStorageShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + err := rtp.SaveTxBlockToStorage(blockBody) + + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_RequestBlockTransactionsNoMissingTxsShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + + _ = rtp.SaveTxBlockToStorage(blockBody) + + res := rtp.RequestBlockTransactions(blockBody) + assert.Equal(t, 0, res) +} + +func TestRewardTxPreprocessor_RequestTransactionsForMiniBlockShouldWork(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + res := rtp.RequestTransactionsForMiniBlock(mb1) + assert.Equal(t, 0, res) +} + +func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { + t.Parallel() + + txHash := "tx1_hash" + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte(txHash)} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + var rewardMiniBlocks block.MiniBlockSlice + rewardMiniBlocks = append(rewardMiniBlocks, &mb1) + + rtp.AddComputedRewardMiniBlocks(rewardMiniBlocks) + + var blockBody block.Body + blockBody = append(blockBody, &mb1, &mb2) + + err := rtp.ProcessBlockTransactions(blockBody, 0, haveTimeTrue) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + err := rtp.IsDataPrepared(1, haveTime) + + assert.Equal(t, process.ErrTimeIsOut, err) +} + +func TestRewardTxPreprocessor_IsDataPrepared(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + go func() { + time.Sleep(50 * time.Millisecond) + rtp.chReceivedAllRewardTxs <- true + }() + + err := rtp.IsDataPrepared(1, haveTime) + + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + storer := mock.ChainStorerMock{ + GetAllCalled: func(unitType dataRetriever.UnitType, keys [][]byte) (map[string][]byte, error) { + retMap := map[string][]byte{ + "tx_hash1": []byte(`{"Round": 0}`), + } + + return retMap, nil + }, + GetStorerCalled: func(unitType dataRetriever.UnitType) storage.Storer { + return &mock.StorerStub{ + RemoveCalled: func(key []byte) error { + return nil + }, + } + }, + } + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &storer, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + txHashes := [][]byte{[]byte("tx_hash1")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + var blockBody block.Body + blockBody = append(blockBody, &mb1) + miniBlockPool := mock.NewCacherMock() + + numRestoredTxs, err := rtp.RestoreTxBlockIntoPools(blockBody, miniBlockPool) + assert.Equal(t, 1, numRestoredTxs) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksTxForMiniBlockNotFoundShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{ + CreateAllInterMiniBlocksCalled: func() map[uint32]*block.MiniBlock { + txHashes := [][]byte{[]byte("hash_unavailable")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + return map[uint32]*block.MiniBlock{ + 0: &mb1, + } + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) + assert.Nil(t, mBlocksSlice) + assert.Equal(t, process.ErrNilRewardTransaction, err) +} + +func TestRewardTxPreprocessor_CreateAndProcessMiniBlocksShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{ + CreateAllInterMiniBlocksCalled: func() map[uint32]*block.MiniBlock { + txHashes := [][]byte{[]byte("tx1_hash")} + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + + return map[uint32]*block.MiniBlock{ + 0: &mb1, + } + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + mBlocksSlice, err := rtp.CreateAndProcessMiniBlocks(1, 1, 0, haveTimeTrue) + assert.NotNil(t, mBlocksSlice) + assert.Nil(t, err) +} + +func TestRewardTxPreprocessor_CreateBlockStartedShouldCleanMap(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &mock.ChainStorerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + func(shardID uint32, txHashes [][]byte) {}, + ) + + rtp.CreateBlockStarted() + assert.Equal(t, 0, len(rtp.rewardTxsForBlock.txHashAndInfo)) +} diff --git a/process/block/preprocess/rewardsHandler.go b/process/block/preprocess/rewardsHandler.go new file mode 100644 index 00000000000..4f2bd7f46d6 --- /dev/null +++ b/process/block/preprocess/rewardsHandler.go @@ -0,0 +1,472 @@ +package preprocess + +import ( + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type rewardsHandler struct { + address process.SpecialAddressHandler + hasher hashing.Hasher + marshalizer marshal.Marshalizer + shardCoordinator sharding.Coordinator + adrConv state.AddressConverter + store dataRetriever.StorageService + rewardTxPool dataRetriever.ShardedDataCacherNotifier + + mutGenRewardTxs sync.RWMutex + protocolRewards []data.TransactionHandler + protocolRewardsMeta []data.TransactionHandler + feeRewards []data.TransactionHandler + + mut sync.Mutex + accumulatedFees *big.Int + rewardTxsForBlock map[string]*rewardTx.RewardTx + economicsRewards process.RewardsHandler + rewardValue *big.Int +} + +// NewRewardTxHandler constructor for the reward transaction handler +func NewRewardTxHandler( + address process.SpecialAddressHandler, + hasher hashing.Hasher, + marshalizer marshal.Marshalizer, + shardCoordinator sharding.Coordinator, + adrConv state.AddressConverter, + store dataRetriever.StorageService, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, + economicsRewards process.RewardsHandler, +) (*rewardsHandler, error) { + if address == nil || address.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + if adrConv == nil || adrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if store == nil || store.IsInterfaceNil() { + return nil, process.ErrNilStorage + } + if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if economicsRewards == nil || economicsRewards.IsInterfaceNil() { + return nil, process.ErrNilEconomicsRewardsHandler + } + + rewardValue := economicsRewards.RewardsValue() + + rtxh := &rewardsHandler{ + address: address, + shardCoordinator: shardCoordinator, + adrConv: adrConv, + hasher: hasher, + marshalizer: marshalizer, + store: store, + rewardTxPool: rewardTxPool, + economicsRewards: economicsRewards, + rewardValue: rewardValue, + } + + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + + return rtxh, nil +} + +// SaveCurrentIntermediateTxToStorage saves current cached data into storage - already saved for txs +func (rtxh *rewardsHandler) SaveCurrentIntermediateTxToStorage() error { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for _, rTx := range rtxh.rewardTxsForBlock { + buff, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return err + } + + errNotCritical := rtxh.store.Put(dataRetriever.RewardTransactionUnit, rtxh.hasher.Compute(string(buff)), buff) + if errNotCritical != nil { + log.Error(errNotCritical.Error()) + } + } + + return nil +} + +// AddIntermediateTransactions adds intermediate transactions to local cache +func (rtxh *rewardsHandler) AddIntermediateTransactions(txs []data.TransactionHandler) error { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + for i := 0; i < len(txs); i++ { + addedRewardTx, ok := txs[i].(*rewardTx.RewardTx) + if !ok { + return process.ErrWrongTypeAssertion + } + + if addedRewardTx.ShardId != rtxh.shardCoordinator.SelfId() { + continue + } + + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, txs[i]) + if err != nil { + return err + } + + rtxh.rewardTxsForBlock[string(rewardTxHash)] = addedRewardTx + } + + return nil +} + +// CreateAllInterMiniBlocks creates miniblocks from process transactions +func (rtxh *rewardsHandler) CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock { + rtxh.mutGenRewardTxs.Lock() + + rtxh.feeRewards = rtxh.createRewardFromFees() + rtxh.addTransactionsToPool(rtxh.feeRewards) + + rtxh.protocolRewards = rtxh.createProtocolRewards() + rtxh.addTransactionsToPool(rtxh.protocolRewards) + + rtxh.protocolRewardsMeta = rtxh.createProtocolRewardsForMeta() + rtxh.addTransactionsToPool(rtxh.protocolRewardsMeta) + + calculatedRewardTxs := make([]data.TransactionHandler, 0) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewardsMeta...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + + rtxh.mutGenRewardTxs.Unlock() + + miniBlocks := rtxh.miniblocksFromRewardTxs(calculatedRewardTxs) + + return miniBlocks +} + +func (rtxh *rewardsHandler) addTransactionsToPool(rewardTxs []data.TransactionHandler) { + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + } + + // add the reward transaction to the the pool so that the processor can find it + cacheId := process.ShardCacherIdentifier(rtxh.shardCoordinator.SelfId(), dstShId) + rtxh.rewardTxPool.AddData(txHash, rTx, cacheId) + } +} + +func (rtxh *rewardsHandler) miniblocksFromRewardTxs( + rewardTxs []data.TransactionHandler, +) map[uint32]*block.MiniBlock { + miniBlocks := make(map[uint32]*block.MiniBlock, 0) + + for _, rTx := range rewardTxs { + dstShId, err := rtxh.address.ShardIdForAddress(rTx.GetRecvAddress()) + if err != nil { + log.Debug(err.Error()) + continue + } + + txHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, rTx) + if err != nil { + log.Debug(err.Error()) + continue + } + + var ok bool + var mb *block.MiniBlock + if mb, ok = miniBlocks[dstShId]; !ok { + mb = &block.MiniBlock{ + ReceiverShardID: dstShId, + SenderShardID: rtxh.shardCoordinator.SelfId(), + Type: block.RewardsBlock, + } + } + + mb.TxHashes = append(mb.TxHashes, txHash) + miniBlocks[dstShId] = mb + } + + return miniBlocks +} + +// VerifyInterMiniBlocks verifies if transaction fees were correctly handled for the block +func (rtxh *rewardsHandler) VerifyInterMiniBlocks(body block.Body) error { + err := rtxh.verifyCreatedRewardsTxs() + return err +} + +// CreateBlockStarted does the cleanup before creating a new block +func (rtxh *rewardsHandler) CreateBlockStarted() { + rtxh.cleanCachedData() +} + +// CreateMarshalizedData creates the marshalized data for broadcasting purposes +func (rtxh *rewardsHandler) CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + marshaledTxs := make([][]byte, 0) + for _, txHash := range txHashes { + rTx, ok := rtxh.rewardTxsForBlock[string(txHash)] + if !ok { + return nil, process.ErrRewardTxNotFound + } + + marshaledTx, err := rtxh.marshalizer.Marshal(rTx) + if err != nil { + return nil, process.ErrMarshalWithoutSuccess + } + marshaledTxs = append(marshaledTxs, marshaledTx) + } + + return marshaledTxs, nil +} + +// ProcessTransactionFee adds the tx cost to the accumulated amount +func (rtxh *rewardsHandler) ProcessTransactionFee(cost *big.Int) { + if cost == nil { + log.Debug(process.ErrNilValue.Error()) + return + } + + rtxh.mut.Lock() + rtxh.accumulatedFees = rtxh.accumulatedFees.Add(rtxh.accumulatedFees, cost) + rtxh.mut.Unlock() +} + +// cleanCachedData deletes the cached data +func (rtxh *rewardsHandler) cleanCachedData() { + rtxh.mut.Lock() + rtxh.accumulatedFees = big.NewInt(0) + rtxh.rewardTxsForBlock = make(map[string]*rewardTx.RewardTx) + rtxh.mut.Unlock() + + rtxh.mutGenRewardTxs.Lock() + rtxh.feeRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewards = make([]data.TransactionHandler, 0) + rtxh.protocolRewardsMeta = make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.Unlock() +} + +func getPercentageOfValue(value *big.Int, percentage float64) *big.Int { + x := new(big.Float).SetInt(value) + y := big.NewFloat(percentage) + + z := new(big.Float).Mul(x, y) + + op := big.NewInt(0) + result, _ := z.Int(op) + + return result +} + +func (rtxh *rewardsHandler) createLeaderTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, rtxh.economicsRewards.LeaderPercentage()) + currTx.RcvAddr = rtxh.address.LeaderAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() + + return currTx +} + +func (rtxh *rewardsHandler) createBurnTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, rtxh.economicsRewards.BurnPercentage()) + currTx.RcvAddr = rtxh.address.BurnAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() + + return currTx +} + +func (rtxh *rewardsHandler) createCommunityTx() *rewardTx.RewardTx { + currTx := &rewardTx.RewardTx{} + + currTx.Value = getPercentageOfValue(rtxh.accumulatedFees, rtxh.economicsRewards.CommunityPercentage()) + currTx.RcvAddr = rtxh.address.ElrondCommunityAddress() + currTx.ShardId = rtxh.shardCoordinator.SelfId() + currTx.Epoch = rtxh.address.Epoch() + currTx.Round = rtxh.address.Round() + + return currTx +} + +// createRewardFromFees creates the reward transactions from accumulated fees +// According to economic paper, out of the block fees 40% are burned, 50% go to the +// leader and 10% go to Elrond community fund. +func (rtxh *rewardsHandler) createRewardFromFees() []data.TransactionHandler { + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + if rtxh.accumulatedFees.Cmp(big.NewInt(1)) < 0 { + rtxh.accumulatedFees = big.NewInt(0) + return nil + } + + leaderTx := rtxh.createLeaderTx() + communityTx := rtxh.createCommunityTx() + burnTx := rtxh.createBurnTx() + + currFeeTxs := make([]data.TransactionHandler, 0) + currFeeTxs = append(currFeeTxs, leaderTx, communityTx, burnTx) + + return currFeeTxs +} + +// createProtocolRewards creates the protocol reward transactions +func (rtxh *rewardsHandler) createProtocolRewards() []data.TransactionHandler { + consensusRewardData := rtxh.address.ConsensusShardRewardData() + + consensusRewardTxs := make([]data.TransactionHandler, 0) + for _, address := range consensusRewardData.Addresses { + rTx := &rewardTx.RewardTx{} + rTx.Value = rtxh.rewardValue + rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = consensusRewardData.Epoch + rTx.Round = consensusRewardData.Round + + consensusRewardTxs = append(consensusRewardTxs, rTx) + } + + return consensusRewardTxs +} + +// createProtocolRewardsForMeta creates the protocol reward transactions +func (rtxh *rewardsHandler) createProtocolRewardsForMeta() []data.TransactionHandler { + metaRewardsData := rtxh.address.ConsensusMetaRewardData() + consensusRewardTxs := make([]data.TransactionHandler, 0) + + for _, metaConsensusSet := range metaRewardsData { + for _, address := range metaConsensusSet.Addresses { + shardId, err := rtxh.address.ShardIdForAddress([]byte(address)) + if err != nil { + log.Error(err.Error()) + continue + } + + if shardId != rtxh.shardCoordinator.SelfId() { + continue + } + + rTx := &rewardTx.RewardTx{} + rTx.Value = rtxh.rewardValue + rTx.RcvAddr = []byte(address) + rTx.ShardId = rtxh.shardCoordinator.SelfId() + rTx.Epoch = metaConsensusSet.Epoch + rTx.Round = metaConsensusSet.Round + + consensusRewardTxs = append(consensusRewardTxs, rTx) + } + } + + return consensusRewardTxs +} + +// verifyCreatedRewardsTxs verifies if the calculated rewards transactions and the block reward transactions are the same +func (rtxh *rewardsHandler) verifyCreatedRewardsTxs() error { + calculatedRewardTxs := make([]data.TransactionHandler, 0) + rtxh.mutGenRewardTxs.RLock() + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewards...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.protocolRewardsMeta...) + calculatedRewardTxs = append(calculatedRewardTxs, rtxh.feeRewards...) + rtxh.mutGenRewardTxs.RUnlock() + + rtxh.mut.Lock() + defer rtxh.mut.Unlock() + + totalFeesFromBlock := big.NewInt(0) + for _, rTx := range rtxh.rewardTxsForBlock { + totalFeesFromBlock = totalFeesFromBlock.Add(totalFeesFromBlock, rTx.GetValue()) + } + + if len(calculatedRewardTxs) != len(rtxh.rewardTxsForBlock) { + return process.ErrRewardTxsMismatchCreatedReceived + } + + totalCalculatedFees := big.NewInt(0) + for _, value := range calculatedRewardTxs { + totalCalculatedFees = totalCalculatedFees.Add(totalCalculatedFees, value.GetValue()) + + rewardTxHash, err := core.CalculateHash(rtxh.marshalizer, rtxh.hasher, value) + if err != nil { + return err + } + + txFromBlock, ok := rtxh.rewardTxsForBlock[string(rewardTxHash)] + if !ok { + return process.ErrRewardTxNotFound + } + if txFromBlock.GetValue().Cmp(value.GetValue()) != 0 { + return process.ErrRewardTxsDoNotMatch + } + } + + return nil +} + +// GetAllCurrentFinishedTxs returns the cached finalized transactions for current round +func (rtxh *rewardsHandler) GetAllCurrentFinishedTxs() map[string]data.TransactionHandler { + rtxh.mut.Lock() + + rewardTxPool := make(map[string]data.TransactionHandler) + for txHash, info := range rtxh.rewardTxsForBlock { + + senderShard := info.ShardId + receiverShard, err := rtxh.address.ShardIdForAddress(info.RcvAddr) + if err != nil { + continue + } + if receiverShard != rtxh.shardCoordinator.SelfId() { + continue + } + if senderShard != rtxh.shardCoordinator.SelfId() { + continue + } + rewardTxPool[txHash] = info + } + rtxh.mut.Unlock() + + return rewardTxPool +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtxh *rewardsHandler) IsInterfaceNil() bool { + if rtxh == nil { + return true + } + return false +} diff --git a/process/block/preprocess/rewardsHandler_test.go b/process/block/preprocess/rewardsHandler_test.go new file mode 100644 index 00000000000..60d7292af5b --- /dev/null +++ b/process/block/preprocess/rewardsHandler_test.go @@ -0,0 +1,613 @@ +package preprocess + +import ( + "bytes" + "math/big" + "reflect" + "testing" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func RewandsHandlerMock() *mock.RewardsHandlerMock { + return &mock.RewardsHandlerMock{ + RewardsValueCalled: func() *big.Int { + return big.NewInt(1000) + }, + CommunityPercentageCalled: func() float64 { + return 0.10 + }, + LeaderPercentageCalled: func() float64 { + return 0.50 + }, + BurnPercentageCalled: func() float64 { + return 0.40 + }, + } +} + +func TestNewRewardTxHandler_NilSpecialAddressShouldErr(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + nil, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilSpecialAddressHandler, err) +} + +func TestNewRewardTxHandler_NilHasher(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + nil, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxHandler_NilMarshalizer(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxHandler_NilShardCoordinator(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxHandler_NilAddressConverter(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxHandler_NilChainStorer(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + nil, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, th) + assert.Equal(t, process.ErrNilStorage, err) +} + +func TestNewRewardTxHandler_NilRewardsPool(t *testing.T) { + t.Parallel() + + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + nil, + RewandsHandlerMock(), + ) + + assert.Nil(t, th) + assert.NotNil(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxHandler_ValsOk(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) +} + +func TestRewardsHandler_AddIntermediateTransactions(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.AddIntermediateTransactions(nil) + assert.Nil(t, err) +} + +func TestRewardsHandler_ProcessTransactionFee(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(nil) + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(10)) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + + th.ProcessTransactionFee(big.NewInt(100)) + assert.Equal(t, big.NewInt(110), th.accumulatedFees) +} + +func TestRewardsHandler_cleanCachedData(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + th.ProcessTransactionFee(big.NewInt(10)) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{}}) + assert.Equal(t, big.NewInt(10), th.accumulatedFees) + assert.Equal(t, 1, len(th.rewardTxsForBlock)) + + th.cleanCachedData() + assert.Equal(t, big.NewInt(0), th.accumulatedFees) + assert.Equal(t, 0, len(th.rewardTxsForBlock)) +} + +func TestRewardsHandler_CreateRewardsFromFees(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, err := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + txs := th.createRewardFromFees() + assert.Equal(t, 0, len(txs)) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + + txs = th.createRewardFromFees() + assert.Equal(t, 3, len(txs)) + + totalSum := txs[0].GetValue().Uint64() + totalSum += txs[1].GetValue().Uint64() + totalSum += txs[2].GetValue().Uint64() + + assert.Equal(t, currTxFee.Uint64(), totalSum) +} + +func TestRewardsHandler_VerifyCreatedRewardsTxsRewardTxNotFound(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.CreateAllInterMiniBlocks() + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.BurnAddress()}}) + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxNotFound, err) +} + +func TestRewardsHandler_VerifyCreatedRewardsTxsTotalTxsFeesDoNotMatch(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + extraVal := big.NewInt(100) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: extraVal, RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Equal(t, process.ErrRewardTxsMismatchCreatedReceived, err) +} + +func TestRewardsHandler_VerifyCreatedRewardsTxsOK(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + adrConv := &mock.AddressConverterMock{} + shardCoordinator := mock.NewMultiShardsCoordinatorMock(3) + nodesCoordinator := mock.NewNodesCoordinatorMock() + addr := mock.NewSpecialAddressHandlerMock(adrConv, shardCoordinator, nodesCoordinator) + th, err := NewRewardTxHandler( + addr, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + adrConv, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(5), RcvAddr: addr.ElrondCommunityAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(25), RcvAddr: addr.LeaderAddress()}}) + _ = th.AddIntermediateTransactions([]data.TransactionHandler{&rewardTx.RewardTx{Value: big.NewInt(20), RcvAddr: addr.BurnAddress()}}) + _ = th.CreateAllInterMiniBlocks() + err = th.verifyCreatedRewardsTxs() + assert.Nil(t, err) +} + +func TestRewardsHandler_CreateAllInterMiniBlocksOK(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + nodesCoordinator := mock.NewNodesCoordinatorMock() + tdp := initDataPool() + th, err := NewRewardTxHandler( + mock.NewSpecialAddressHandlerMock( + &mock.AddressConverterMock{}, + shardCoordinator, + nodesCoordinator, + ), + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + mbs := th.CreateAllInterMiniBlocks() + assert.Equal(t, 0, len(mbs)) + + currTxFee := big.NewInt(50) + th.ProcessTransactionFee(currTxFee) + + mbs = th.CreateAllInterMiniBlocks() + assert.Equal(t, 1, len(mbs)) +} + +func TestRewardsHandler_GetAllCurrentFinishedTxs(t *testing.T) { + t.Parallel() + + nodesCoordinator := mock.NewNodesCoordinatorMock() + shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) + tdp := initDataPool() + specialAddress := &mock.SpecialAddressHandlerMock{ + AdrConv: &mock.AddressConverterMock{}, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + } + + _ = specialAddress.SetShardConsensusData([]byte("random"), 0, 0, shardCoordinator.SelfId()) + rewardData := specialAddress.ConsensusShardRewardData() + + th, err := NewRewardTxHandler( + specialAddress, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + shardCoordinator, + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, err) + assert.NotNil(t, th) + + txs := make([]data.TransactionHandler, len(rewardData.Addresses)) + for i := 0; i < len(rewardData.Addresses); i++ { + txs[i] = &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte(rewardData.Addresses[i]), + ShardId: 0, + } + + } + + err = th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + finishedTxs := th.GetAllCurrentFinishedTxs() + assert.Equal(t, len(txs), len(finishedTxs)) + + for _, ftx := range finishedTxs { + found := false + for _, tx := range txs { + if reflect.DeepEqual(tx, ftx) { + found = true + break + } + } + + assert.True(t, found) + } +} + +func TestRewardsHandler_CreateMarshalizedDataShouldWork(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + txs := []data.TransactionHandler{ + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + }, + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + }, + } + + err := th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + var expectedMarshalizedTxs [][]byte + marshTx1, _ := th.marshalizer.Marshal(txs[0]) + marshTx2, _ := th.marshalizer.Marshal(txs[1]) + expectedMarshalizedTxs = append(expectedMarshalizedTxs, marshTx1, marshTx2) + + var txsHashes [][]byte + tx1Hash, _ := core.CalculateHash(th.marshalizer, th.hasher, txs[0]) + tx2Hash, _ := core.CalculateHash(th.marshalizer, th.hasher, txs[1]) + txsHashes = append(txsHashes, tx1Hash, tx2Hash) + + res, err := th.CreateMarshalizedData(txsHashes) + assert.Nil(t, err) + assert.Equal(t, len(txs), len(res)) + assert.True(t, bytes.Equal(expectedMarshalizedTxs[0], res[0])) + assert.True(t, bytes.Equal(expectedMarshalizedTxs[1], res[1])) +} + +func TestRewardsHandler_CreateBlockStartedShouldCreateProtocolReward(t *testing.T) { + t.Parallel() + + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{}, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + assert.Nil(t, th.protocolRewards) + + th.CreateBlockStarted() + assert.NotNil(t, th.protocolRewards) +} + +func TestRewardsHandler_SaveCurrentIntermediateTxToStorageShouldWork(t *testing.T) { + t.Parallel() + + putWasCalled := false + tdp := initDataPool() + th, _ := NewRewardTxHandler( + &mock.SpecialAddressHandlerMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AddressConverterMock{}, + &mock.ChainStorerMock{ + PutCalled: func(unitType dataRetriever.UnitType, key []byte, value []byte) error { + putWasCalled = true + return nil + }, + }, + tdp.RewardTransactions(), + RewandsHandlerMock(), + ) + + txs := []data.TransactionHandler{ + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + }, + &rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(1), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + }, + } + + err := th.AddIntermediateTransactions(txs) + assert.Nil(t, err) + + err = th.SaveCurrentIntermediateTxToStorage() + assert.Nil(t, err) + assert.True(t, putWasCalled) +} diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 4b1b20ca62f..469f04fe575 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -130,13 +130,11 @@ func (scr *smartContractResults) RemoveTxBlockFromPools(body block.Body, miniBlo func (scr *smartContractResults) RestoreTxBlockIntoPools( body block.Body, miniBlockPool storage.Cacher, -) (int, map[int][]byte, error) { +) (int, error) { if miniBlockPool == nil || miniBlockPool.IsInterfaceNil() { - return 0, nil, process.ErrNilMiniBlockPool + return 0, process.ErrNilMiniBlockPool } - miniBlockHashes := make(map[int][]byte) - scrRestored := 0 for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -147,45 +145,44 @@ func (scr *smartContractResults) RestoreTxBlockIntoPools( strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) scrBuff, err := scr.storage.GetAll(dataRetriever.UnsignedTransactionUnit, miniBlock.TxHashes) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } for txHash, txBuff := range scrBuff { tx := smartContractResult.SmartContractResult{} err = scr.marshalizer.Unmarshal(&tx, txBuff) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } scr.scrPool.AddData([]byte(txHash), &tx, strCache) err = scr.storage.GetStorer(dataRetriever.UnsignedTransactionUnit).Remove([]byte(txHash)) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } } miniBlockHash, err := core.CalculateHash(scr.marshalizer, scr.hasher, miniBlock) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } - restoredHash := scr.restoreMiniBlock(miniBlock, miniBlockHash, miniBlockPool) + miniBlockPool.Put(miniBlockHash, miniBlock) err = scr.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) if err != nil { - return scrRestored, miniBlockHashes, err + return scrRestored, err } - miniBlockHashes[i] = restoredHash scrRestored += len(miniBlock.TxHashes) } - return scrRestored, miniBlockHashes, nil + return scrRestored, nil } // ProcessBlockTransactions processes all the smartContractResult from the block.Body, updates the state -func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -201,7 +198,7 @@ func (scr *smartContractResults) ProcessBlockTransactions(body block.Body, round } for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { + if !haveTime() { return process.ErrTimeIsOut } @@ -419,6 +416,17 @@ func (scr *smartContractResults) CreateAndProcessMiniBlock(sndShardId, dstShardI return nil, nil } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (scr *smartContractResults) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + _ func() bool, +) (block.MiniBlockSlice, error) { + return nil, nil +} + // ProcessMiniBlock processes all the smartContractResults from a and saves the processed smartContractResults in local cache complete miniblock func (scr *smartContractResults) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { if miniBlock.Type != block.SmartContractResultBlock { diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index b6686f174b8..22851d4d989 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -300,7 +300,7 @@ func TestScrsPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork(t *tes func TestScrsPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { t.Parallel() - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { @@ -352,7 +352,7 @@ func TestScrsPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() senderShardId := uint32(0) destinationShardId := uint32(1) @@ -619,7 +619,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { miniblock := block.MiniBlock{ ReceiverShardID: 0, - SenderShardID: 0, + SenderShardID: 1, TxHashes: txHashes, Type: block.SmartContractResultBlock, } @@ -635,7 +635,7 @@ func TestScrsPreprocessor_ProcessBlockTransactions(t *testing.T) { scr.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&smartcr, &txshardInfo} - err := scr.ProcessBlockTransactions(body, 1, haveTime) + err := scr.ProcessBlockTransactions(body, 1, haveTimeTrue) assert.Nil(t, err) } @@ -756,7 +756,7 @@ func TestScrsPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { } } - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ AddDataCalled: func(key []byte, data interface{}, cacheId string) { @@ -794,9 +794,8 @@ func TestScrsPreprocessor_RestoreTxBlockIntoPools(t *testing.T) { body = append(body, &miniblock) miniblockPool := mock.NewCacherMock() - scrRestored, miniBlockHashes, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) + scrRestored, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) - assert.Equal(t, miniBlockHashes[0], []uint8([]byte(nil))) assert.Equal(t, scrRestored, 1) assert.Nil(t, err) } @@ -826,7 +825,7 @@ func TestScrsPreprocessor__RestoreTxBlockIntoPoolsNilMiniblockPoolShouldErr(t *t miniblockPool := storage.Cacher(nil) - _, _, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) + _, err := scr.RestoreTxBlockIntoPools(body, miniblockPool) assert.NotNil(t, err) assert.Equal(t, err, process.ErrNilMiniBlockPool) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 8eee907dcb5..7d2324687ce 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "sort" + "sync" "time" "github.com/ElrondNetwork/elrond-go/core" @@ -34,6 +35,10 @@ type transactions struct { storage dataRetriever.StorageService txProcessor process.TransactionProcessor accounts state.AccountsAdapter + orderedTxs map[string][]*transaction.Transaction + orderedTxHashes map[string][][]byte + mutOrderedTxs sync.RWMutex + economicsFee process.FeeHandler } // NewTransactionPreprocessor creates a new transaction preprocessor object @@ -46,6 +51,7 @@ func NewTransactionPreprocessor( shardCoordinator sharding.Coordinator, accounts state.AccountsAdapter, onRequestTransaction func(shardID uint32, txHashes [][]byte), + economicsFee process.FeeHandler, ) (*transactions, error) { if hasher == nil || hasher.IsInterfaceNil() { @@ -86,12 +92,15 @@ func NewTransactionPreprocessor( onRequestTransaction: onRequestTransaction, txProcessor: txProcessor, accounts: accounts, + economicsFee: economicsFee, } txs.chRcvAllTxs = make(chan bool) txs.txPool.RegisterHandler(txs.receivedTransaction) txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) + txs.orderedTxs = make(map[string][]*transaction.Transaction) + txs.orderedTxHashes = make(map[string][][]byte) return &txs, nil } @@ -142,8 +151,7 @@ func (txs *transactions) RemoveTxBlockFromPools(body block.Body, miniBlockPool s func (txs *transactions) RestoreTxBlockIntoPools( body block.Body, miniBlockPool storage.Cacher, -) (int, map[int][]byte, error) { - miniBlockHashes := make(map[int][]byte) +) (int, error) { txsRestored := 0 for i := 0; i < len(body); i++ { @@ -151,45 +159,44 @@ func (txs *transactions) RestoreTxBlockIntoPools( strCache := process.ShardCacherIdentifier(miniBlock.SenderShardID, miniBlock.ReceiverShardID) txsBuff, err := txs.storage.GetAll(dataRetriever.TransactionUnit, miniBlock.TxHashes) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } for txHash, txBuff := range txsBuff { tx := transaction.Transaction{} err = txs.marshalizer.Unmarshal(&tx, txBuff) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } txs.txPool.AddData([]byte(txHash), &tx, strCache) err = txs.storage.GetStorer(dataRetriever.TransactionUnit).Remove([]byte(txHash)) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } } miniBlockHash, err := core.CalculateHash(txs.marshalizer, txs.hasher, miniBlock) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } - restoredHash := txs.restoreMiniBlock(miniBlock, miniBlockHash, miniBlockPool) + miniBlockPool.Put(miniBlockHash, miniBlock) err = txs.storage.GetStorer(dataRetriever.MiniBlockUnit).Remove(miniBlockHash) if err != nil { - return txsRestored, miniBlockHashes, err + return txsRestored, err } - miniBlockHashes[i] = restoredHash txsRestored += len(miniBlock.TxHashes) } - return txsRestored, miniBlockHashes, nil + return txsRestored, nil } // ProcessBlockTransactions processes all the transaction from the block.Body, updates the state -func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { // basic validation already done in interceptors for i := 0; i < len(body); i++ { miniBlock := body[i] @@ -198,7 +205,7 @@ func (txs *transactions) ProcessBlockTransactions(body block.Body, round uint64, } for j := 0; j < len(miniBlock.TxHashes); j++ { - if haveTime() < 0 { + if !haveTime() { return process.ErrTimeIsOut } @@ -267,6 +274,11 @@ func (txs *transactions) CreateBlockStarted() { txs.txsForCurrBlock.missingTxs = 0 txs.txsForCurrBlock.txHashAndInfo = make(map[string]*txInfo) txs.txsForCurrBlock.mutTxsForBlock.Unlock() + + txs.mutOrderedTxs.Lock() + txs.orderedTxs = make(map[string][]*transaction.Transaction) + txs.orderedTxHashes = make(map[string][][]byte) + txs.mutOrderedTxs.Unlock() } // RequestBlockTransactions request for transactions if missing from a block.Body @@ -400,9 +412,6 @@ func (txs *transactions) getAllTxsFromMiniBlock( return transactions, txHashes, nil } -//TODO move this constant to txFeeHandler -const minGasLimitForTx = uint64(5) - //TODO move this to smart contract address calculation component func isSmartContractAddress(rcvAddress []byte) bool { isEmptyAddress := bytes.Equal(rcvAddress, make([]byte, len(rcvAddress))) @@ -419,37 +428,89 @@ func isSmartContractAddress(rcvAddress []byte) bool { return false } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the transactions added into the miniblocks +// as long as it has time +func (txs *transactions) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, +) (block.MiniBlockSlice, error) { + + miniBlocks := make(block.MiniBlockSlice, 0) + newMBAdded := true + txSpaceRemained := int(maxTxSpaceRemained) + + for newMBAdded { + newMBAdded = false + for shardId := uint32(0); shardId < txs.shardCoordinator.NumberOfShards(); shardId++ { + if maxTxSpaceRemained <= 0 { + break + } + + mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) + if mbSpaceRemained <= 0 { + break + } + + miniBlock, err := txs.CreateAndProcessMiniBlock( + txs.shardCoordinator.SelfId(), + shardId, + txSpaceRemained, + haveTime, + round) + if err != nil { + continue + } + + if len(miniBlock.TxHashes) > 0 { + txSpaceRemained -= len(miniBlock.TxHashes) + miniBlocks = append(miniBlocks, miniBlock) + newMBAdded = true + } + } + } + + return miniBlocks, nil +} + // CreateAndProcessMiniBlock creates the miniblock from storage and processes the transactions added into the miniblock -func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { - strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) - txStore := txs.txPool.ShardDataStore(strCache) +func (txs *transactions) CreateAndProcessMiniBlock( + sndShardId uint32, + dstShardId uint32, + spaceRemained int, + haveTime func() bool, + round uint64, +) (*block.MiniBlock, error) { + + var orderedTxs []*transaction.Transaction + var orderedTxHashes [][]byte timeBefore := time.Now() - orderedTxes, orderedTxHashes, err := SortTxByNonce(txStore) + orderedTxs, orderedTxHashes, err := txs.computeOrderedTxs(sndShardId, dstShardId) timeAfter := time.Now() if err != nil { - log.Info(err.Error()) + log.Debug(err.Error()) return nil, err } if !haveTime() { - log.Info(fmt.Sprintf("time is up after ordered %d txs in %v sec\n", len(orderedTxes), timeAfter.Sub(timeBefore).Seconds())) + log.Info(fmt.Sprintf("time is up after ordered %d txs in %v sec\n", len(orderedTxs), timeAfter.Sub(timeBefore).Seconds())) return nil, process.ErrTimeIsOut } - log.Debug(fmt.Sprintf("time elapsed to ordered %d txs: %v sec\n", len(orderedTxes), timeAfter.Sub(timeBefore).Seconds())) + log.Debug(fmt.Sprintf("time elapsed to ordered %d txs: %v sec\n", len(orderedTxs), timeAfter.Sub(timeBefore).Seconds())) miniBlock := &block.MiniBlock{} miniBlock.SenderShardID = sndShardId miniBlock.ReceiverShardID = dstShardId miniBlock.TxHashes = make([][]byte, 0) miniBlock.Type = block.TxBlock - log.Info(fmt.Sprintf("creating mini blocks has been started: have %d txs in pool for shard id %d\n", len(orderedTxes), miniBlock.ReceiverShardID)) addedTxs := 0 addedGasLimitPerCrossShardMiniblock := uint64(0) - for index := range orderedTxes { + for index := range orderedTxs { if !haveTime() { break } @@ -458,12 +519,16 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 continue } - currTxGasLimit := minGasLimitForTx - if isSmartContractAddress(orderedTxes[index].RcvAddr) { - currTxGasLimit = orderedTxes[index].GasLimit + currTxGasLimit := txs.economicsFee.MinGasLimit() + if isSmartContractAddress(orderedTxs[index].RcvAddr) { + currTxGasLimit = orderedTxs[index].GasLimit } - if addedGasLimitPerCrossShardMiniblock+currTxGasLimit > process.MaxGasLimitPerMiniBlock { + isGasLimitReached := addedGasLimitPerCrossShardMiniblock+currTxGasLimit > process.MaxGasLimitPerMiniBlock + if isGasLimitReached { + log.Info(fmt.Sprintf("max gas limit per mini block is reached: added %d txs from %d txs\n", + len(miniBlock.TxHashes), + len(orderedTxs))) continue } @@ -472,7 +537,7 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 // execute transaction to change the trie root hash err := txs.processAndRemoveBadTransaction( orderedTxHashes[index], - orderedTxes[index], + orderedTxs[index], round, miniBlock.SenderShardID, miniBlock.ReceiverShardID, @@ -492,7 +557,9 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 addedGasLimitPerCrossShardMiniblock += currTxGasLimit if addedTxs >= spaceRemained { // max transactions count in one block was reached - log.Info(fmt.Sprintf("max txs accepted in one block is reached: added %d txs from %d txs\n", len(miniBlock.TxHashes), len(orderedTxes))) + log.Info(fmt.Sprintf("max txs accepted in one block is reached: added %d txs from %d txs\n", + len(miniBlock.TxHashes), + len(orderedTxs))) return miniBlock, nil } } @@ -500,6 +567,49 @@ func (txs *transactions) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32 return miniBlock, nil } +func (txs *transactions) computeOrderedTxs( + sndShardId uint32, + dstShardId uint32, +) ([]*transaction.Transaction, [][]byte, error) { + + var err error + + strCache := process.ShardCacherIdentifier(sndShardId, dstShardId) + txShardPool := txs.txPool.ShardDataStore(strCache) + + if txShardPool == nil { + return nil, nil, process.ErrNilTxDataPool + } + if txShardPool.Len() == 0 { + return nil, nil, process.ErrEmptyTxDataPool + } + + txs.mutOrderedTxs.RLock() + orderedTxs := txs.orderedTxs[strCache] + orderedTxHashes := txs.orderedTxHashes[strCache] + txs.mutOrderedTxs.RUnlock() + + alreadyOrdered := len(orderedTxs) > 0 + if !alreadyOrdered { + orderedTxs, orderedTxHashes, err = SortTxByNonce(txShardPool) + if err != nil { + return nil, nil, err + } + + log.Info(fmt.Sprintf("creating mini blocks has been started: have %d txs in pool for shard %d from shard %d\n", + len(orderedTxs), + dstShardId, + sndShardId)) + + txs.mutOrderedTxs.Lock() + txs.orderedTxs[strCache] = orderedTxs + txs.orderedTxHashes[strCache] = orderedTxHashes + txs.mutOrderedTxs.Unlock() + } + + return orderedTxs, orderedTxHashes, nil +} + // ProcessMiniBlock processes all the transactions from a and saves the processed transactions in local cache complete miniblock func (txs *transactions) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error { if miniBlock.Type != block.TxBlock { @@ -535,9 +645,9 @@ func (txs *transactions) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime f } // SortTxByNonce sort transactions according to nonces -func SortTxByNonce(txShardStore storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { - if txShardStore == nil { - return nil, nil, process.ErrNilCacher +func SortTxByNonce(txShardPool storage.Cacher) ([]*transaction.Transaction, [][]byte, error) { + if txShardPool == nil { + return nil, nil, process.ErrNilTxDataPool } transactions := make([]*transaction.Transaction, 0) @@ -548,8 +658,8 @@ func SortTxByNonce(txShardStore storage.Cacher) ([]*transaction.Transaction, [][ nonces := make([]uint64, 0) - for _, key := range txShardStore.Keys() { - val, _ := txShardStore.Peek(key) + for _, key := range txShardPool.Keys() { + val, _ := txShardPool.Peek(key) if val == nil { continue } diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 8c00e50f546..2aa67e92738 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "math/big" "math/rand" "reflect" "sync" @@ -12,6 +13,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -25,6 +27,20 @@ import ( "github.com/stretchr/testify/assert" ) +func FeeHandlerMock() *mock.FeeHandlerStub { + return &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + } +} + func initDataPool() *mock.PoolsHolderStub { sdp := &mock.PoolsHolderStub{ TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { @@ -46,6 +62,7 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { if reflect.DeepEqual(key, []byte("tx1_hash")) { @@ -74,6 +91,7 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { if reflect.DeepEqual(key, []byte("tx1_hash")) { @@ -83,6 +101,35 @@ func initDataPool() *mock.PoolsHolderStub { }, } }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) {}, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return &rewardTx.RewardTx{Value: big.NewInt(100)}, true + } + return nil, false + }, + } + }, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{} }, @@ -154,6 +201,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilPool(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -174,6 +222,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilStore(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -194,6 +243,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilHasher(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -214,6 +264,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilMarsalizer(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -234,6 +285,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilTxProce(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -254,6 +306,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilShardCoord(t *testing.T) { nil, &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -274,6 +327,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilAccounts(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), nil, requestTransaction, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -293,6 +347,7 @@ func TestTxsPreprocessor_NewTransactionPreprocessorNilRequestFunc(t *testing.T) mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, nil, + FeeHandlerMock(), ) assert.Nil(t, txs) @@ -312,6 +367,7 @@ func TestTxsPreProcessor_GetTransactionFromPool(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) txHash := []byte("tx1_hash") tx, _ := process.GetTransactionHandlerFromPool(1, 1, txHash, tdp.Transactions()) @@ -333,6 +389,7 @@ func TestTransactionPreprocessor_RequestTransactionFromNetwork(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) shardId := uint32(1) txHash1 := []byte("tx_hash1") @@ -360,6 +417,7 @@ func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) shardId := uint32(1) @@ -376,7 +434,7 @@ func TestTransactionPreprocessor_RequestBlockTransactionFromMiniBlockFromNetwork func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *testing.T) { t.Parallel() - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() shardedDataStub := &mock.ShardedDataStub{ ShardDataStoreCalled: func(cacheId string) (c storage.Cacher) { @@ -402,6 +460,7 @@ func TestTransactionPreprocessor_ReceivedTransactionShouldEraseRequested(t *test mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) //add 3 tx hashes on requested list @@ -435,7 +494,7 @@ func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() senderShardId := uint32(0) destinationShardId := uint32(1) @@ -475,6 +534,7 @@ func TestTransactionPreprocessor_GetAllTxsFromMiniBlockShouldWork(t *testing.T) mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) mb := &block.MiniBlock{ @@ -509,6 +569,7 @@ func TestTransactionPreprocessor_RemoveBlockTxsFromPoolNilBlockShouldErr(t *test mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) err := txs.RemoveTxBlockFromPools(nil, tdp.MiniBlocks()) assert.NotNil(t, err) @@ -528,6 +589,7 @@ func TestTransactionPreprocessor_RemoveBlockTxsFromPoolOK(t *testing.T) { mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) body := make(block.Body, 0) txHash := []byte("txHash") @@ -562,6 +624,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAll(t *testi mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.NotNil(t, txs) @@ -604,6 +667,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddAllAsNoSCCal mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.NotNil(t, txs) @@ -648,6 +712,7 @@ func TestTransactions_CreateAndProcessMiniBlockCrossShardGasLimitAddOnly5asSCCal mock.NewMultiShardsCoordinatorMock(3), &mock.AccountsStub{}, requestTransaction, + FeeHandlerMock(), ) assert.NotNil(t, txs) @@ -697,12 +762,12 @@ func init() { r = rand.New(rand.NewSource(time.Now().UnixNano())) } -func TestSortTxByNonce_NilCacherShouldErr(t *testing.T) { +func TestSortTxByNonce_NilTxDataPoolShouldErr(t *testing.T) { t.Parallel() transactions, txHashes, err := SortTxByNonce(nil) assert.Nil(t, transactions) assert.Nil(t, txHashes) - assert.Equal(t, process.ErrNilCacher, err) + assert.Equal(t, process.ErrNilTxDataPool, err) } func TestSortTxByNonce_EmptyCacherShouldReturnEmpty(t *testing.T) { diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 248bcfb7496..5a5d62d670b 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -18,38 +18,25 @@ import ( "github.com/ElrondNetwork/elrond-go/statusHandler" ) +const maxCleanTime = time.Second + // shardProcessor implements shardProcessor interface and actually it tries to execute block type shardProcessor struct { *baseProcessor - dataPool dataRetriever.PoolsHolder - blocksTracker process.BlocksTracker - metaBlockFinality int - - chRcvAllMetaHdrs chan bool - mutUsedMetaHdrsHashes sync.Mutex - usedMetaHdrsHashes map[uint64][][]byte - - mutRequestedMetaHdrsHashes sync.RWMutex - requestedMetaHdrsHashes map[string]bool - currHighestMetaHdrNonce uint64 - allNeededMetaHdrsFound bool - - core serviceContainer.Core - txCoordinator process.TransactionCoordinator - txCounter *transactionCounter + dataPool dataRetriever.PoolsHolder + metaBlockFinality uint32 + chRcvAllMetaHdrs chan bool + processedMiniBlocks map[string]map[string]struct{} + mutProcessedMiniBlocks sync.RWMutex + core serviceContainer.Core + txCoordinator process.TransactionCoordinator + txCounter *transactionCounter + txsPoolsCleaner process.PoolsCleaner } // NewShardProcessor creates a new shardProcessor object func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { - - err := checkProcessorNilParameters( - arguments.Accounts, - arguments.ForkDetector, - arguments.Hasher, - arguments.Marshalizer, - arguments.Store, - arguments.ShardCoordinator, - arguments.Uint64Converter) + err := checkProcessorNilParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } @@ -57,12 +44,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { if arguments.DataPool == nil || arguments.DataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } - if arguments.BlocksTracker == nil || arguments.BlocksTracker.IsInterfaceNil() { - return nil, process.ErrNilBlocksTracker - } - if arguments.RequestHandler == nil || arguments.RequestHandler.IsInterfaceNil() { - return nil, process.ErrNilRequestHandler - } if arguments.TxCoordinator == nil || arguments.TxCoordinator.IsInterfaceNil() { return nil, process.ErrNilTransactionCoordinator } @@ -80,6 +61,8 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { marshalizer: arguments.Marshalizer, store: arguments.Store, shardCoordinator: arguments.ShardCoordinator, + nodesCoordinator: arguments.NodesCoordinator, + specialAddressHandler: arguments.SpecialAddressHandler, uint64Converter: arguments.Uint64Converter, onRequestHeaderHandlerByNonce: arguments.RequestHandler.RequestHeaderByNonce, appStatusHandler: statusHandler.NewNilStatusHandler(), @@ -89,15 +72,18 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return nil, err } - sp := shardProcessor{ - core: arguments.Core, - baseProcessor: base, - dataPool: arguments.DataPool, - blocksTracker: arguments.BlocksTracker, - txCoordinator: arguments.TxCoordinator, - txCounter: NewTransactionCounter(), + if arguments.TxsPoolsCleaner == nil || arguments.TxsPoolsCleaner.IsInterfaceNil() { + return nil, process.ErrNilTxsPoolsCleaner } + sp := shardProcessor{ + core: arguments.Core, + baseProcessor: base, + dataPool: arguments.DataPool, + txCoordinator: arguments.TxCoordinator, + txCounter: NewTransactionCounter(), + txsPoolsCleaner: arguments.TxsPoolsCleaner, + } sp.chRcvAllMetaHdrs = make(chan bool) transactionPool := sp.dataPool.Transactions() @@ -105,8 +91,9 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return nil, process.ErrNilTransactionPool } - sp.requestedMetaHdrsHashes = make(map[string]bool) - sp.usedMetaHdrsHashes = make(map[uint64][][]byte) + sp.hdrsForCurrBlock.hdrHashAndInfo = make(map[string]*hdrInfo) + sp.hdrsForCurrBlock.highestHdrNonce = make(map[uint32]uint64) + sp.processedMiniBlocks = make(map[string]map[string]struct{}) metaBlockPool := sp.dataPool.MetaBlocks() if metaBlockPool == nil { @@ -116,7 +103,6 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { sp.onRequestHeaderHandler = arguments.RequestHandler.RequestHeader sp.metaBlockFinality = process.MetaBlockFinality - sp.allNeededMetaHdrsFound = true return &sp, nil } @@ -135,6 +121,14 @@ func (sp *shardProcessor) ProcessBlock( err := sp.checkBlockValidity(chainHandler, headerHandler, bodyHandler) if err != nil { + if err == process.ErrBlockHashDoesNotMatch { + log.Info(fmt.Sprintf("requested missing shard header with hash %s for shard %d\n", + core.ToB64(headerHandler.GetPrevHash()), + headerHandler.GetShardID())) + + go sp.onRequestHeaderHandler(headerHandler.GetShardID(), headerHandler.GetPrevHash()) + } + return err } @@ -165,9 +159,20 @@ func (sp *shardProcessor) ProcessBlock( log.Info(fmt.Sprintf("Total txs in pool: %d\n", numTxWithDst)) + err = sp.specialAddressHandler.SetShardConsensusData( + headerHandler.GetPrevRandSeed(), + headerHandler.GetRound(), + headerHandler.GetEpoch(), + headerHandler.GetShardID(), + ) + if err != nil { + return err + } + sp.txCoordinator.CreateBlockStarted() + sp.createBlockStarted() sp.txCoordinator.RequestBlockTransactions(body) - requestedMetaHdrs, requestedFinalMetaHdrs := sp.requestMetaHeaders(header) + requestedMetaHdrs, requestedFinalityAttestingMetaHdrs := sp.requestMetaHeaders(header) if haveTime() < 0 { return process.ErrTimeIsOut @@ -178,16 +183,24 @@ func (sp *shardProcessor) ProcessBlock( return err } - if requestedMetaHdrs > 0 || requestedFinalMetaHdrs > 0 { - log.Info(fmt.Sprintf("requested %d missing meta headers and %d final meta headers\n", requestedMetaHdrs, requestedFinalMetaHdrs)) + haveMissingMetaHeaders := requestedMetaHdrs > 0 || requestedFinalityAttestingMetaHdrs > 0 + if haveMissingMetaHeaders { + log.Info(fmt.Sprintf("requested %d missing meta headers and %d finality attesting meta headers\n", + requestedMetaHdrs, + requestedFinalityAttestingMetaHdrs)) + err = sp.waitForMetaHdrHashes(haveTime()) - sp.mutRequestedMetaHdrsHashes.Lock() - sp.allNeededMetaHdrsFound = true - unreceivedMetaHdrs := len(sp.requestedMetaHdrsHashes) - sp.mutRequestedMetaHdrsHashes.Unlock() + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + missingMetaHdrs := sp.hdrsForCurrBlock.missingHdrs + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + sp.resetMissingHdrs() + if requestedMetaHdrs > 0 { - log.Info(fmt.Sprintf("received %d missing meta headers\n", int(requestedMetaHdrs)-unreceivedMetaHdrs)) + log.Info(fmt.Sprintf("received %d missing meta headers\n", requestedMetaHdrs-missingMetaHdrs)) } + if err != nil { return err } @@ -201,7 +214,7 @@ func (sp *shardProcessor) ProcessBlock( go sp.checkAndRequestIfMetaHeadersMissing(header.Round) }() - err = sp.checkMetaHeadersValidityAndFinality(header) + err = sp.checkMetaHeadersValidityAndFinality() if err != nil { return err } @@ -217,13 +230,23 @@ func (sp *shardProcessor) ProcessBlock( } }() + processedMetaHdrs, err := sp.getOrderedProcessedMetaBlocksFromMiniBlocks(body) + if err != nil { + return err + } + + err = sp.setMetaConsensusData(processedMetaHdrs) + if err != nil { + return err + } + err = sp.txCoordinator.ProcessBlockTransaction(body, header.Round, haveTime) if err != nil { return err } if !sp.verifyStateRoot(header.GetRootHash()) { - err = process.ErrRootStateMissmatch + err = process.ErrRootStateDoesNotMatch return err } @@ -235,50 +258,52 @@ func (sp *shardProcessor) ProcessBlock( return nil } -// checkMetaHeadersValidity - checks if listed metaheaders are valid as construction -func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Header) error { - metablockCache := sp.dataPool.MetaBlocks() - if metablockCache == nil { - return process.ErrNilMetaBlockPool - } +func (sp *shardProcessor) setMetaConsensusData(finalizedMetaBlocks []data.HeaderHandler) error { + sp.specialAddressHandler.ClearMetaConsensusData() - tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) - if err != nil { - return err + // for every finalized metablock header, reward the metachain consensus group members with accounts in shard + for _, metaBlock := range finalizedMetaBlocks { + round := metaBlock.GetRound() + epoch := metaBlock.GetEpoch() + err := sp.specialAddressHandler.SetMetaConsensusData(metaBlock.GetPrevRandSeed(), round, epoch) + if err != nil { + return err + } } - currAddedMetaHdrs := make([]*block.MetaBlock, 0) - for _, metaHash := range header.MetaBlockHashes { - value, ok := metablockCache.Peek(metaHash) - if !ok { - return process.ErrNilMetaBlockHeader - } + return nil +} - metaHdr, ok := value.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } +// SetConsensusData - sets the reward data for the current consensus group +func (sp *shardProcessor) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + err := sp.specialAddressHandler.SetShardConsensusData(randomness, round, epoch, shardId) + if err != nil { + log.Error(err.Error()) + } +} - currAddedMetaHdrs = append(currAddedMetaHdrs, metaHdr) +// checkMetaHeadersValidity - checks if listed metaheaders are valid as construction +func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { + tmpNotedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) + if err != nil { + return err } - if len(currAddedMetaHdrs) == 0 { + usedMetaHdrs := sp.sortHeadersForCurrentBlockByNonce(true) + if len(usedMetaHdrs[sharding.MetachainShardId]) == 0 { return nil } - sort.Slice(currAddedMetaHdrs, func(i, j int) bool { - return currAddedMetaHdrs[i].Nonce < currAddedMetaHdrs[j].Nonce - }) - - for _, metaHdr := range currAddedMetaHdrs { - err := sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) + for _, metaHdr := range usedMetaHdrs[sharding.MetachainShardId] { + err = sp.isHdrConstructionValid(metaHdr, tmpNotedHdr) if err != nil { return err } + tmpNotedHdr = metaHdr } - err = sp.checkMetaHdrFinality(tmpNotedHdr, header.Round) + err = sp.checkMetaHdrFinality(tmpNotedHdr) if err != nil { return err } @@ -287,33 +312,30 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality(header *block.Head } // check if shard headers are final by checking if newer headers were constructed upon them -func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler, round uint64) error { +func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error { if header == nil || header.IsInterfaceNil() { return process.ErrNilBlockHeader } - sortedMetaHdrs, err := sp.getOrderedMetaBlocks(round) - if err != nil { - return err - } + finalityAttestingMetaHdrs := sp.sortHeadersForCurrentBlockByNonce(false) lastVerifiedHdr := header // verify if there are "K" block after current to make this one final - nextBlocksVerified := 0 - for _, tmpHdr := range sortedMetaHdrs { + nextBlocksVerified := uint32(0) + for _, metaHdr := range finalityAttestingMetaHdrs[sharding.MetachainShardId] { if nextBlocksVerified >= sp.metaBlockFinality { break } // found a header with the next nonce - if tmpHdr.hdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { - err := sp.isHdrConstructionValid(tmpHdr.hdr, lastVerifiedHdr) + if metaHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { + err := sp.isHdrConstructionValid(metaHdr, lastVerifiedHdr) if err != nil { log.Debug(err.Error()) continue } - lastVerifiedHdr = tmpHdr.hdr + lastVerifiedHdr = metaHdr nextBlocksVerified += 1 } } @@ -328,7 +350,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler, round // check if header has the same miniblocks as presented in body func (sp *shardProcessor) checkHeaderBodyCorrelation(hdr *block.Header, body block.Body) error { - mbHashesFromHdr := make(map[string]*block.MiniBlockHeader) + mbHashesFromHdr := make(map[string]*block.MiniBlockHeader, len(hdr.MiniBlockHeaders)) for i := 0; i < len(hdr.MiniBlockHeaders); i++ { mbHashesFromHdr[string(hdr.MiniBlockHeaders[i].Hash)] = &hdr.MiniBlockHeaders[i] } @@ -393,19 +415,34 @@ func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing(round uint64) { func (sp *shardProcessor) indexBlockIfNeeded( body data.BodyHandler, - header data.HeaderHandler) { + header data.HeaderHandler, + lastBlockHeader data.HeaderHandler, +) { if sp.core == nil || sp.core.Indexer() == nil { return } txPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock) scPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock) + rewardPool := sp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock) for hash, tx := range scPool { txPool[hash] = tx } + for hash, tx := range rewardPool { + txPool[hash] = tx + } + + shardId := sp.shardCoordinator.SelfId() + pubKeys, err := sp.nodesCoordinator.GetValidatorsPublicKeys(header.GetPrevRandSeed(), header.GetRound(), shardId) + if err != nil { + return + } + + signersIndexes := sp.nodesCoordinator.GetValidatorsIndexes(pubKeys) + go sp.core.Indexer().SaveBlock(body, header, txPool, signersIndexes) - go sp.core.Indexer().SaveBlock(body, header, txPool) + saveRoundInfoInElastic(sp.core.Indexer(), sp.nodesCoordinator, shardId, header, lastBlockHeader, signersIndexes) } // RestoreBlockIntoPools restores the TxBlock and MetaBlock into associated pools @@ -429,7 +466,7 @@ func (sp *shardProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler return process.ErrWrongTypeAssertion } - restoredTxNr, _, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) + restoredTxNr, err := sp.txCoordinator.RestoreBlockDataFromStorage(body) go sp.txCounter.subtractRestoredTxs(restoredTxNr) if err != nil { return err @@ -470,7 +507,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui processedMiniBlocks := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) for mbHash := range processedMiniBlocks { - metaBlock.SetMiniBlockProcessed([]byte(mbHash), true) + sp.addProcessedMiniBlock(metaBlockHash, []byte(mbHash)) } metaBlockPool.Put(metaBlockHash, &metaBlock) @@ -490,32 +527,8 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui } } - for _, metaBlockKey := range metaBlockPool.Keys() { - if len(miniBlockHashes) == 0 { - break - } - metaBlock, ok := metaBlockPool.Peek(metaBlockKey) - if !ok { - log.Error(process.ErrNilMetaBlockHeader.Error()) - continue - } - - hdr, ok := metaBlock.(data.HeaderHandler) - if !ok { - metaBlockPool.Remove(metaBlockKey) - log.Error(process.ErrWrongTypeAssertion.Error()) - continue - } - - crossMiniBlockHashes := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[key] - if !ok { - continue - } - - hdr.SetMiniBlockProcessed([]byte(key), false) - } + for miniBlockHash := range miniBlockHashes { + sp.removeProcessedMiniBlock([]byte(miniBlockHash)) } return nil @@ -526,9 +539,10 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool(miniBlockHashes map[string]ui func (sp *shardProcessor) CreateBlockBody(round uint64, haveTime func() bool) (data.BodyHandler, error) { log.Debug(fmt.Sprintf("started creating block body in round %d\n", round)) sp.txCoordinator.CreateBlockStarted() + sp.createBlockStarted() sp.blockSizeThrottler.ComputeMaxItems() - miniBlocks, err := sp.createMiniBlocks(sp.shardCoordinator.NumberOfShards(), sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) + miniBlocks, err := sp.createMiniBlocks(sp.blockSizeThrottler.MaxItemsToAdd(), round, haveTime) if err != nil { return nil, err } @@ -618,7 +632,12 @@ func (sp *shardProcessor) CommitBlock( log.LogIfError(errNotCritical) } - processedMetaHdrs, err := sp.getProcessedMetaBlocksFromHeader(header) + processedMetaHdrs, err := sp.getOrderedProcessedMetaBlocksFromHeader(header) + if err != nil { + return err + } + + err = sp.addProcessedCrossMiniBlocksFromHeader(header) if err != nil { return err } @@ -645,18 +664,16 @@ func (sp *shardProcessor) CommitBlock( return err } - log.Info(fmt.Sprintf("shardBlock with nonce %d and hash %s has been committed successfully\n", + log.Info(fmt.Sprintf("shard block with nonce %d and hash %s has been committed successfully\n", header.Nonce, core.ToB64(headerHash))) - sp.blocksTracker.AddBlock(header) - errNotCritical = sp.txCoordinator.RemoveBlockDataFromPool(body) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } - errNotCritical = sp.removeProcessedMetablocksFromPool(processedMetaHdrs) + errNotCritical = sp.removeProcessedMetaBlocksFromPool(processedMetaHdrs) if errNotCritical != nil { log.Debug(errNotCritical.Error()) } @@ -666,15 +683,19 @@ func (sp *shardProcessor) CommitBlock( log.Debug(errNotCritical.Error()) } - log.Info(fmt.Sprintf("shardBlock with nonce %d is the highest block notarized by metachain for shard %d\n", - sp.forkDetector.GetHighestFinalBlockNonce(), + highestFinalBlockNonce := sp.forkDetector.GetHighestFinalBlockNonce() + log.Info(fmt.Sprintf("shard block with nonce %d is the highest final block in shard %d\n", + highestFinalBlockNonce, sp.shardCoordinator.SelfId())) sp.appStatusHandler.SetStringValue(core.MetricCurrentBlockHash, core.ToB64(headerHash)) + sp.appStatusHandler.SetUInt64Value(core.MetricHighestFinalBlockInShard, highestFinalBlockNonce) - hdrsToAttestPreviousFinal := uint32(header.Nonce-sp.forkDetector.GetHighestFinalBlockNonce()) + 1 + hdrsToAttestPreviousFinal := uint32(header.Nonce-highestFinalBlockNonce) + 1 sp.removeNotarizedHdrsBehindPreviousFinal(hdrsToAttestPreviousFinal) + lastBlockHeader := chainHandler.GetCurrentBlockHeader() + err = chainHandler.SetCurrentBlockBody(body) if err != nil { return err @@ -686,8 +707,9 @@ func (sp *shardProcessor) CommitBlock( } chainHandler.SetCurrentBlockHeaderHash(headerHash) + sp.indexBlockIfNeeded(bodyHandler, headerHandler, lastBlockHeader) - sp.indexBlockIfNeeded(bodyHandler, headerHandler) + go sp.cleanTxsPools() // write data to log go sp.txCounter.displayLogInfo( @@ -704,6 +726,12 @@ func (sp *shardProcessor) CommitBlock( return nil } +func (sp *shardProcessor) cleanTxsPools() { + _, err := sp.txsPoolsCleaner.Clean(maxCleanTime) + log.LogIfError(err) + log.Info(fmt.Sprintf("%d txs have been removed from pools after cleaning\n", sp.txsPoolsCleaner.NumRemovedTxs())) +} + // getHighestHdrForOwnShardFromMetachain calculates the highest shard header notarized by metachain func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( processedHdrs []data.HeaderHandler, @@ -711,9 +739,7 @@ func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( ownShIdHdrs := make([]data.HeaderHandler, 0) - sort.Slice(processedHdrs, func(i, j int) bool { - return processedHdrs[i].GetNonce() < processedHdrs[j].GetNonce() - }) + process.SortHeadersByNonce(processedHdrs) for i := 0; i < len(processedHdrs); i++ { hdr, ok := processedHdrs[i].(*block.MetaBlock) @@ -733,14 +759,12 @@ func (sp *shardProcessor) getHighestHdrForOwnShardFromMetachain( ownShIdHdrs = append(ownShIdHdrs, &block.Header{}) } - sort.Slice(ownShIdHdrs, func(i, j int) bool { - return ownShIdHdrs[i].GetNonce() < ownShIdHdrs[j].GetNonce() - }) + process.SortHeadersByNonce(ownShIdHdrs) - ownShIdHdrsHashes := make([][]byte, 0) + ownShIdHdrsHashes := make([][]byte, len(ownShIdHdrs)) for i := 0; i < len(ownShIdHdrs); i++ { hash, _ := core.CalculateHash(sp.marshalizer, sp.hasher, ownShIdHdrs[i]) - ownShIdHdrsHashes = append(ownShIdHdrsHashes, hash) + ownShIdHdrsHashes[i] = hash } return ownShIdHdrs, ownShIdHdrsHashes, nil @@ -778,49 +802,136 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr return ownShIdHdr, nil } -// getProcessedMetaBlocksFromHeader returns all the meta blocks fully processed -func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { +// getOrderedProcessedMetaBlocksFromHeader returns all the meta blocks fully processed +func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromHeader(header *block.Header) ([]data.HeaderHandler, error) { if header == nil { return nil, process.ErrNilBlockHeader } - miniBlockHashes := make(map[int][]byte, 0) + miniBlockHashes := make(map[int][]byte, len(header.MiniBlockHeaders)) + for i := 0; i < len(header.MiniBlockHeaders); i++ { + miniBlockHashes[i] = header.MiniBlockHeaders[i].Hash + } + + log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) + + processedMetaBlocks, err := sp.getOrderedProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) + if err != nil { + return nil, err + } + + return processedMetaBlocks, nil +} + +func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(header *block.Header) error { + if header == nil { + return process.ErrNilBlockHeader + } + + miniBlockHashes := make(map[int][]byte, len(header.MiniBlockHeaders)) for i := 0; i < len(header.MiniBlockHeaders); i++ { miniBlockHashes[i] = header.MiniBlockHeaders[i].Hash } + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for _, metaBlockHash := range header.MetaBlockHashes { + hdrInfo, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] + if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrMissingHeader + } + + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) + if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + return process.ErrWrongTypeAssertion + } + + crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for key, miniBlockHash := range miniBlockHashes { + _, ok = crossMiniBlockHashes[string(miniBlockHash)] + if !ok { + continue + } + + sp.addProcessedMiniBlock(metaBlockHash, miniBlockHash) + + delete(miniBlockHashes, key) + } + } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + return nil +} + +// getOrderedProcessedMetaBlocksFromMiniBlocks returns all the meta blocks fully processed ordered +func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlocks( + usedMiniBlocks []*block.MiniBlock, +) ([]data.HeaderHandler, error) { + + miniBlockHashes := make(map[int][]byte) + for i := 0; i < len(usedMiniBlocks); i++ { + if usedMiniBlocks[i].SenderShardID == sp.shardCoordinator.SelfId() { + continue + } + + miniBlockHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, usedMiniBlocks[i]) + if err != nil { + log.Debug(err.Error()) + continue + } + + miniBlockHashes[i] = miniBlockHash + } + log.Debug(fmt.Sprintf("cross mini blocks in body: %d\n", len(miniBlockHashes))) + processedMetaBlocks, err := sp.getOrderedProcessedMetaBlocksFromMiniBlockHashes(miniBlockHashes) + + return processedMetaBlocks, err +} + +func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( + miniBlockHashes map[int][]byte, +) ([]data.HeaderHandler, error) { processedMetaHdrs := make([]data.HeaderHandler, 0) - for _, metaBlockKey := range header.MetaBlockHashes { - obj, _ := sp.dataPool.MetaBlocks().Peek(metaBlockKey) - if obj == nil { - return nil, process.ErrNilMetaBlockHeader + processedCrossMiniBlocksHashes := make(map[string]bool) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for metaBlockHash, hdrInfo := range sp.hdrsForCurrBlock.hdrHashAndInfo { + if !hdrInfo.usedInBlock { + continue } - metaBlock, ok := obj.(*block.MetaBlock) + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() return nil, process.ErrWrongTypeAssertion } log.Debug(fmt.Sprintf("meta header nonce: %d\n", metaBlock.Nonce)) crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for key := range miniBlockHashes { - _, ok := crossMiniBlockHashes[string(miniBlockHashes[key])] + for hash := range crossMiniBlockHashes { + processedCrossMiniBlocksHashes[hash] = sp.isMiniBlockProcessed([]byte(metaBlockHash), []byte(hash)) + } + + for key, miniBlockHash := range miniBlockHashes { + _, ok = crossMiniBlockHashes[string(miniBlockHash)] if !ok { continue } - metaBlock.SetMiniBlockProcessed(miniBlockHashes[key], true) + processedCrossMiniBlocksHashes[string(miniBlockHash)] = true + delete(miniBlockHashes, key) } log.Debug(fmt.Sprintf("cross mini blocks in meta header: %d\n", len(crossMiniBlockHashes))) processedAll := true - for key := range crossMiniBlockHashes { - if !metaBlock.GetMiniBlockProcessed([]byte(key)) { + for hash := range crossMiniBlockHashes { + if !processedCrossMiniBlocksHashes[hash] { processedAll = false break } @@ -830,18 +941,20 @@ func (sp *shardProcessor) getProcessedMetaBlocksFromHeader(header *block.Header) processedMetaHdrs = append(processedMetaHdrs, metaBlock) } } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() + + process.SortHeadersByNonce(processedMetaHdrs) return processedMetaHdrs, nil } -func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs []data.HeaderHandler) error { +func (sp *shardProcessor) removeProcessedMetaBlocksFromPool(processedMetaHdrs []data.HeaderHandler) error { lastNotarizedMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { return err } processed := 0 - unnotarized := len(sp.blocksTracker.UnnotarisedBlocks()) // processedMetaHdrs is also sorted for i := 0; i < len(processedMetaHdrs); i++ { hdr := processedMetaHdrs[i] @@ -851,9 +964,6 @@ func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs [] continue } - errNotCritical := sp.blocksTracker.RemoveNotarisedBlocks(hdr) - log.LogIfError(errNotCritical) - // metablock was processed and finalized buff, err := sp.marshalizer.Marshal(hdr) if err != nil { @@ -877,6 +987,7 @@ func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs [] sp.dataPool.MetaBlocks().Remove(headerHash) sp.dataPool.HeadersNonces().Remove(hdr.GetNonce(), sharding.MetachainShardId) + sp.removeAllProcessedMiniBlocks(headerHash) log.Debug(fmt.Sprintf("metaBlock with round %d nonce %d and hash %s has been processed completely and removed from pool\n", hdr.GetRound(), @@ -890,11 +1001,6 @@ func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs [] log.Debug(fmt.Sprintf("%d meta blocks have been processed completely and removed from pool\n", processed)) } - notarized := unnotarized - len(sp.blocksTracker.UnnotarisedBlocks()) - if notarized > 0 { - log.Debug(fmt.Sprintf("%d shard blocks have been notarised by metachain\n", notarized)) - } - return nil } @@ -902,67 +1008,61 @@ func (sp *shardProcessor) removeProcessedMetablocksFromPool(processedMetaHdrs [] // upon receiving, it parses the new metablock and requests miniblocks and transactions // which destination is the current shard func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { - metaBlksCache := sp.dataPool.MetaBlocks() - if metaBlksCache == nil { - return - } - - metaHdrsNoncesCache := sp.dataPool.HeadersNonces() - if metaHdrsNoncesCache == nil && sp.metaBlockFinality > 0 { - return - } - - miniBlksCache := sp.dataPool.MiniBlocks() - if miniBlksCache == nil || miniBlksCache.IsInterfaceNil() { + metaBlockPool := sp.dataPool.MetaBlocks() + if metaBlockPool == nil { return } - obj, ok := metaBlksCache.Peek(metaBlockHash) + obj, ok := metaBlockPool.Peek(metaBlockHash) if !ok { return } - metaBlock, ok := obj.(data.HeaderHandler) + metaBlock, ok := obj.(*block.MetaBlock) if !ok { return } - log.Debug(fmt.Sprintf("received metablock with hash %s and nonce %d from network\n", + log.Debug(fmt.Sprintf("received meta block with hash %s and nonce %d from network\n", core.ToB64(metaBlockHash), - metaBlock.GetNonce())) + metaBlock.Nonce)) - sp.mutRequestedMetaHdrsHashes.Lock() + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() - if !sp.allNeededMetaHdrsFound { - if sp.requestedMetaHdrsHashes[string(metaBlockHash)] { - delete(sp.requestedMetaHdrsHashes, string(metaBlockHash)) + haveMissingMetaHeaders := sp.hdrsForCurrBlock.missingHdrs > 0 || sp.hdrsForCurrBlock.missingFinalityAttestingHdrs > 0 + if haveMissingMetaHeaders { + hdrInfoForHash := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] + receivedMissingMetaHeader := hdrInfoForHash != nil && (hdrInfoForHash.hdr == nil || hdrInfoForHash.hdr.IsInterfaceNil()) + if receivedMissingMetaHeader { + hdrInfoForHash.hdr = metaBlock + sp.hdrsForCurrBlock.missingHdrs-- - if metaBlock.GetNonce() > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = metaBlock.GetNonce() + if metaBlock.Nonce > sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] { + sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] = metaBlock.Nonce } } - lenReqMetaHdrsHashes := len(sp.requestedMetaHdrsHashes) - areFinalAttestingHdrsInCache := false - if lenReqMetaHdrsHashes == 0 { - requestedBlockHeaders := sp.requestFinalMissingHeaders() - if requestedBlockHeaders == 0 { - log.Info(fmt.Sprintf("received all final meta headers\n")) - areFinalAttestingHdrsInCache = true + // attesting something + if sp.hdrsForCurrBlock.missingHdrs == 0 { + missingFinalityAttestingMetaHdrs := sp.hdrsForCurrBlock.missingFinalityAttestingHdrs + sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = sp.requestMissingFinalityAttestingHeaders() + if sp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { + log.Info(fmt.Sprintf("received %d missing finality attesting meta headers\n", missingFinalityAttestingMetaHdrs)) } else { - log.Info(fmt.Sprintf("requested %d missing final meta headers\n", requestedBlockHeaders)) + log.Info(fmt.Sprintf("requested %d missing finality attesting meta headers\n", sp.hdrsForCurrBlock.missingFinalityAttestingHdrs)) } } - sp.allNeededMetaHdrsFound = lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache - - sp.mutRequestedMetaHdrsHashes.Unlock() + missingMetaHdrs := sp.hdrsForCurrBlock.missingHdrs + missingFinalityAttestingMetaHdrs := sp.hdrsForCurrBlock.missingFinalityAttestingHdrs + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - if lenReqMetaHdrsHashes == 0 && areFinalAttestingHdrsInCache { + allMissingMetaHeadersReceived := missingMetaHdrs == 0 && missingFinalityAttestingMetaHdrs == 0 + if allMissingMetaHeadersReceived { sp.chRcvAllMetaHdrs <- true } } else { - sp.mutRequestedMetaHdrsHashes.Unlock() + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() } lastNotarizedHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) @@ -979,96 +1079,96 @@ func (sp *shardProcessor) receivedMetaBlock(metaBlockHash []byte) { sp.txCoordinator.RequestMiniBlocks(metaBlock) } -// requestFinalMissingHeaders requests the headers needed to accept the current selected headers for processing the +// requestMissingFinalityAttestingHeaders requests the headers needed to accept the current selected headers for processing the // current block. It requests the metaBlockFinality headers greater than the highest meta header related to the block // which should be processed -func (sp *shardProcessor) requestFinalMissingHeaders() uint32 { +func (sp *shardProcessor) requestMissingFinalityAttestingHeaders() uint32 { requestedBlockHeaders := uint32(0) - for i := sp.currHighestMetaHdrNonce + 1; i <= sp.currHighestMetaHdrNonce+uint64(sp.metaBlockFinality); i++ { - if sp.currHighestMetaHdrNonce == uint64(0) { - continue - } + highestHdrNonce := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + if highestHdrNonce == uint64(0) { + return requestedBlockHeaders + } - _, _, err := process.GetMetaHeaderFromPoolWithNonce( + lastFinalityAttestingHeader := sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] + uint64(sp.metaBlockFinality) + for i := highestHdrNonce + 1; i <= lastFinalityAttestingHeader; i++ { + metaBlock, metaBlockHash, err := process.GetMetaHeaderFromPoolWithNonce( i, sp.dataPool.MetaBlocks(), sp.dataPool.HeadersNonces()) + if err != nil { requestedBlockHeaders++ go sp.onRequestHeaderHandlerByNonce(sharding.MetachainShardId, i) + continue } + + sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] = &hdrInfo{hdr: metaBlock, usedInBlock: false} } return requestedBlockHeaders } -func (sp *shardProcessor) requestMetaHeaders(header *block.Header) (uint32, uint32) { +func (sp *shardProcessor) requestMetaHeaders(shardHeader *block.Header) (uint32, uint32) { _ = process.EmptyChannel(sp.chRcvAllMetaHdrs) - sp.mutRequestedMetaHdrsHashes.Lock() - - sp.requestedMetaHdrsHashes = make(map[string]bool) - sp.allNeededMetaHdrsFound = true - - if len(header.MetaBlockHashes) == 0 { - sp.mutRequestedMetaHdrsHashes.Unlock() + if len(shardHeader.MetaBlockHashes) == 0 { return 0, 0 } - missingHeaderHashes := sp.computeMissingHeaders(header) + missingHeadersHashes := sp.computeMissingAndExistingMetaHeaders(shardHeader) - requestedBlockHeaders := uint32(0) - for _, hash := range missingHeaderHashes { - requestedBlockHeaders++ - sp.requestedMetaHdrsHashes[string(hash)] = true + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() + for _, hash := range missingHeadersHashes { + sp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{hdr: nil, usedInBlock: true} go sp.onRequestHeaderHandler(sharding.MetachainShardId, hash) } - requestedFinalBlockHeaders := uint32(0) - if requestedBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } else { - requestedFinalBlockHeaders = sp.requestFinalMissingHeaders() - if requestedFinalBlockHeaders > 0 { - sp.allNeededMetaHdrsFound = false - } + if sp.hdrsForCurrBlock.missingHdrs == 0 { + sp.hdrsForCurrBlock.missingFinalityAttestingHdrs = sp.requestMissingFinalityAttestingHeaders() } - sp.mutRequestedMetaHdrsHashes.Unlock() + requestedHdrs := sp.hdrsForCurrBlock.missingHdrs + requestedFinalityAttestingHdrs := sp.hdrsForCurrBlock.missingFinalityAttestingHdrs + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return requestedBlockHeaders, requestedFinalBlockHeaders + return requestedHdrs, requestedFinalityAttestingHdrs } -func (sp *shardProcessor) computeMissingHeaders(header *block.Header) [][]byte { - missingHeaders := make([][]byte, 0) - sp.currHighestMetaHdrNonce = uint64(0) +func (sp *shardProcessor) computeMissingAndExistingMetaHeaders(header *block.Header) [][]byte { + missingHeadersHashes := make([][]byte, 0) + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(header.MetaBlockHashes); i++ { hdr, err := process.GetMetaHeaderFromPool( header.MetaBlockHashes[i], sp.dataPool.MetaBlocks()) + if err != nil { - missingHeaders = append(missingHeaders, header.MetaBlockHashes[i]) + missingHeadersHashes = append(missingHeadersHashes, header.MetaBlockHashes[i]) + sp.hdrsForCurrBlock.missingHdrs++ continue } - if hdr.Nonce > sp.currHighestMetaHdrNonce { - sp.currHighestMetaHdrNonce = hdr.Nonce + sp.hdrsForCurrBlock.hdrHashAndInfo[string(header.MetaBlockHashes[i])] = &hdrInfo{hdr: hdr, usedInBlock: true} + + if hdr.Nonce > sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] { + sp.hdrsForCurrBlock.highestHdrNonce[sharding.MetachainShardId] = hdr.Nonce } } + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - return missingHeaders + return missingHeadersHashes } -func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(hdr *block.Header) error { - mMiniBlockMeta, err := sp.getAllMiniBlockDstMeFromMeta(hdr.Round, hdr.MetaBlockHashes) +func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(header *block.Header) error { + miniBlockMetaHashes, err := sp.getAllMiniBlockDstMeFromMeta(header) if err != nil { return err } - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - if _, ok := mMiniBlockMeta[mbHash]; !ok { + crossMiniBlockHashes := header.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for hash := range crossMiniBlockHashes { + if _, ok := miniBlockMetaHashes[hash]; !ok { return process.ErrCrossShardMBWithoutConfirmationFromMeta } } @@ -1076,51 +1176,47 @@ func (sp *shardProcessor) verifyCrossShardMiniBlockDstMe(hdr *block.Header) erro return nil } -func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(round uint64, metaHashes [][]byte) (map[string][]byte, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { - return nil, process.ErrNilMetaBlockPool - } - +func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(header *block.Header) (map[string][]byte, error) { lastHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { return nil, err } - mMiniBlockMeta := make(map[string][]byte) - for _, metaHash := range metaHashes { - val, _ := metaBlockCache.Peek(metaHash) - if val == nil { + miniBlockMetaHashes := make(map[string][]byte) + + sp.hdrsForCurrBlock.mutHdrsForBlock.RLock() + for _, metaBlockHash := range header.MetaBlockHashes { + hdrInfo, ok := sp.hdrsForCurrBlock.hdrHashAndInfo[string(metaBlockHash)] + if !ok { continue } - - hdr, ok := val.(*block.MetaBlock) + metaBlock, ok := hdrInfo.hdr.(*block.MetaBlock) if !ok { continue } - - if hdr.GetRound() > round { + if metaBlock.GetRound() > header.Round { continue } - if hdr.GetRound() <= lastHdr.GetRound() { + if metaBlock.GetRound() <= lastHdr.GetRound() { continue } - if hdr.GetNonce() <= lastHdr.GetNonce() { + if metaBlock.GetNonce() <= lastHdr.GetNonce() { continue } - miniBlockDstMe := hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) - for mbHash := range miniBlockDstMe { - mMiniBlockMeta[mbHash] = metaHash + crossMiniBlockHashes := metaBlock.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId()) + for hash := range crossMiniBlockHashes { + miniBlockMetaHashes[hash] = []byte(metaBlockHash) } } + sp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() - return mMiniBlockMeta, nil + return miniBlockMetaHashes, nil } func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, error) { - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil { + metaBlocksPool := sp.dataPool.MetaBlocks() + if metaBlocksPool == nil { return nil, process.ErrNilMetaBlockPool } @@ -1130,8 +1226,8 @@ func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, err } orderedMetaBlocks := make([]*hashAndHdr, 0) - for _, key := range metaBlockCache.Keys() { - val, _ := metaBlockCache.Peek(key) + for _, key := range metaBlocksPool.Keys() { + val, _ := metaBlocksPool.Peek(key) if val == nil { continue } @@ -1154,9 +1250,11 @@ func (sp *shardProcessor) getOrderedMetaBlocks(round uint64) ([]*hashAndHdr, err orderedMetaBlocks = append(orderedMetaBlocks, &hashAndHdr{hdr: hdr, hash: key}) } - sort.Slice(orderedMetaBlocks, func(i, j int) bool { - return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() - }) + if len(orderedMetaBlocks) > 1 { + sort.Slice(orderedMetaBlocks, func(i, j int) bool { + return orderedMetaBlocks[i].hdr.GetNonce() < orderedMetaBlocks[j].hdr.GetNonce() + }) + } return orderedMetaBlocks, nil } @@ -1172,7 +1270,7 @@ func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHd // verify if there are "K" block after current to make this one final lastVerifiedHdr := currHdr - nextBlocksVerified := 0 + nextBlocksVerified := uint32(0) for i := startPos; i < len(sortedHdrs); i++ { if nextBlocksVerified >= sp.metaBlockFinality { @@ -1201,51 +1299,41 @@ func (sp *shardProcessor) isMetaHeaderFinal(currHdr data.HeaderHandler, sortedHd // full verification through metachain header func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( - noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool, -) (block.MiniBlockSlice, [][]byte, uint32, error) { - - metaBlockCache := sp.dataPool.MetaBlocks() - if metaBlockCache == nil || metaBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMetaBlockPool - } - - miniBlockCache := sp.dataPool.MiniBlocks() - if miniBlockCache == nil || miniBlockCache.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilMiniBlockPool - } - - txPool := sp.dataPool.Transactions() - if txPool == nil || txPool.IsInterfaceNil() { - return nil, nil, 0, process.ErrNilTransactionPool - } +) (block.MiniBlockSlice, uint32, uint32, error) { miniBlocks := make(block.MiniBlockSlice, 0) - nrTxAdded := uint32(0) + txsAdded := uint32(0) + hdrsAdded := uint32(0) orderedMetaBlocks, err := sp.getOrderedMetaBlocks(round) if err != nil { - return nil, nil, 0, err + return nil, 0, 0, err } log.Info(fmt.Sprintf("meta blocks ordered: %d\n", len(orderedMetaBlocks))) lastMetaHdr, err := sp.getLastNotarizedHdr(sharding.MetachainShardId) if err != nil { - return nil, nil, 0, err + return nil, 0, 0, err } // do processing in order - usedMetaHdrsHashes := make([][]byte, 0) + sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(orderedMetaBlocks); i++ { if !haveTime() { - log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", nrTxAdded)) + log.Info(fmt.Sprintf("time is up after putting %d cross txs with destination to current shard\n", txsAdded)) break } - itemsAddedInHeader := uint32(len(usedMetaHdrsHashes) + len(miniBlocks)) + if len(miniBlocks) >= core.MaxMiniBlocksInBlock { + log.Info(fmt.Sprintf("%d max number of mini blocks allowed to be added in one shard block has been reached\n", len(miniBlocks))) + break + } + + itemsAddedInHeader := uint32(len(sp.hdrsForCurrBlock.hdrHashAndInfo) + len(miniBlocks)) if itemsAddedInHeader >= maxItemsInBlock { log.Info(fmt.Sprintf("%d max records allowed to be added in shard header has been reached\n", maxItemsInBlock)) break @@ -1256,7 +1344,7 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( continue } - err := sp.isHdrConstructionValid(hdr, lastMetaHdr) + err = sp.isHdrConstructionValid(hdr, lastMetaHdr) if err != nil { continue } @@ -1267,22 +1355,28 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( } if len(hdr.GetMiniBlockHeadersWithDst(sp.shardCoordinator.SelfId())) == 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + hdrsAdded++ lastMetaHdr = hdr continue } - itemsAddedInBody := nrTxAdded + itemsAddedInBody := txsAdded if itemsAddedInBody >= maxItemsInBlock { continue } maxTxSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInBody) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(itemsAddedInHeader) - 1 + maxMbSpaceRemained := sp.getMaxMiniBlocksSpaceRemained( + maxItemsInBlock, + itemsAddedInHeader+1, + uint32(len(miniBlocks))) if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { + processedMiniBlocksHashes := sp.getProcessedMiniBlocksHashes(orderedMetaBlocks[i].hash) currMBProcessed, currTxsAdded, hdrProcessFinished := sp.txCoordinator.CreateMbsAndProcessCrossShardTransactionsDstMe( hdr, + processedMiniBlocksHashes, uint32(maxTxSpaceRemained), uint32(maxMbSpaceRemained), round, @@ -1290,10 +1384,11 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( // all txs processed, add to processed miniblocks miniBlocks = append(miniBlocks, currMBProcessed...) - nrTxAdded = nrTxAdded + currTxsAdded + txsAdded = txsAdded + currTxsAdded if currTxsAdded > 0 { - usedMetaHdrsHashes = append(usedMetaHdrsHashes, orderedMetaBlocks[i].hash) + sp.hdrsForCurrBlock.hdrHashAndInfo[string(orderedMetaBlocks[i].hash)] = &hdrInfo{hdr: hdr, usedInBlock: true} + hdrsAdded++ } if !hdrProcessFinished { @@ -1303,16 +1398,12 @@ func (sp *shardProcessor) createAndProcessCrossMiniBlocksDstMe( lastMetaHdr = hdr } } + sp.hdrsForCurrBlock.mutHdrsForBlock.Unlock() - sp.mutUsedMetaHdrsHashes.Lock() - sp.usedMetaHdrsHashes[round] = usedMetaHdrsHashes - sp.mutUsedMetaHdrsHashes.Unlock() - - return miniBlocks, usedMetaHdrsHashes, nrTxAdded, nil + return miniBlocks, txsAdded, hdrsAdded, nil } func (sp *shardProcessor) createMiniBlocks( - noShards uint32, maxItemsInBlock uint32, round uint64, haveTime func() bool, @@ -1334,35 +1425,41 @@ func (sp *shardProcessor) createMiniBlocks( return nil, process.ErrNilTransactionPool } - destMeMiniBlocks, usedMetaHdrsHashes, txs, err := sp.createAndProcessCrossMiniBlocksDstMe(noShards, maxItemsInBlock, round, haveTime) + destMeMiniBlocks, nbTxs, nbHdrs, err := sp.createAndProcessCrossMiniBlocksDstMe(maxItemsInBlock, round, haveTime) if err != nil { log.Info(err.Error()) } - log.Debug(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), txs)) + processedMetaHdrs, errNotCritical := sp.getOrderedProcessedMetaBlocksFromMiniBlocks(destMeMiniBlocks) + if errNotCritical != nil { + log.Debug(errNotCritical.Error()) + } + + err = sp.setMetaConsensusData(processedMetaHdrs) + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("processed %d miniblocks and %d txs with destination in self shard\n", len(destMeMiniBlocks), nbTxs)) if len(destMeMiniBlocks) > 0 { miniBlocks = append(miniBlocks, destMeMiniBlocks...) } - if !haveTime() { - log.Info(fmt.Sprintf("time is up added %d transactions\n", txs)) - return miniBlocks, nil - } - - maxTxSpaceRemained := int32(maxItemsInBlock) - int32(txs) - maxMbSpaceRemained := int32(maxItemsInBlock) - int32(len(destMeMiniBlocks)) - int32(len(usedMetaHdrsHashes)) + maxTxSpaceRemained := int32(maxItemsInBlock) - int32(nbTxs) + maxMbSpaceRemained := sp.getMaxMiniBlocksSpaceRemained( + maxItemsInBlock, + uint32(len(destMeMiniBlocks))+nbHdrs, + uint32(len(miniBlocks))) - if maxTxSpaceRemained > 0 && maxMbSpaceRemained > 0 { - mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( - uint32(maxTxSpaceRemained), - uint32(maxMbSpaceRemained), - round, - haveTime) + mbFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe( + uint32(maxTxSpaceRemained), + uint32(maxMbSpaceRemained), + round, + haveTime) - if len(mbFromMe) > 0 { - miniBlocks = append(miniBlocks, mbFromMe...) - } + if len(mbFromMe) > 0 { + miniBlocks = append(miniBlocks, mbFromMe...) } log.Info(fmt.Sprintf("creating mini blocks has been finished: created %d mini blocks\n", len(miniBlocks))) @@ -1393,20 +1490,20 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round return nil, process.ErrWrongTypeAssertion } - mbLen := len(body) totalTxCount := 0 - miniBlockHeaders := make([]block.MiniBlockHeader, mbLen) - for i := 0; i < mbLen; i++ { + miniBlockHeaders := make([]block.MiniBlockHeader, len(body)) + + for i := 0; i < len(body); i++ { txCount := len(body[i].TxHashes) totalTxCount += txCount - mbBytes, err := sp.marshalizer.Marshal(body[i]) + + miniBlockHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, body[i]) if err != nil { return nil, err } - mbHash := sp.hasher.Compute(string(mbBytes)) miniBlockHeaders[i] = block.MiniBlockHeader{ - Hash: mbHash, + Hash: miniBlockHash, SenderShardID: body[i].SenderShardID, ReceiverShardID: body[i].ReceiverShardID, TxCount: uint32(txCount), @@ -1416,22 +1513,15 @@ func (sp *shardProcessor) CreateBlockHeader(bodyHandler data.BodyHandler, round header.MiniBlockHeaders = miniBlockHeaders header.TxCount = uint32(totalTxCount) + metaBlockHashes := sp.sortHeaderHashesForCurrentBlockByNonce(true) + header.MetaBlockHashes = metaBlockHashes[sharding.MetachainShardId] sp.appStatusHandler.SetUInt64Value(core.MetricNumTxInBlock, uint64(totalTxCount)) - sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(mbLen)) - - sp.mutUsedMetaHdrsHashes.Lock() - - if usedMetaHdrsHashes, ok := sp.usedMetaHdrsHashes[round]; ok { - header.MetaBlockHashes = usedMetaHdrsHashes - delete(sp.usedMetaHdrsHashes, round) - } - - sp.mutUsedMetaHdrsHashes.Unlock() + sp.appStatusHandler.SetUInt64Value(core.MetricNumMiniBlocks, uint64(len(body))) sp.blockSizeThrottler.Add( round, - core.Max(header.ItemsInBody(), header.ItemsInHeader())) + core.MaxUint32(header.ItemsInBody(), header.ItemsInHeader())) return header, nil } @@ -1516,3 +1606,69 @@ func (sp *shardProcessor) IsInterfaceNil() bool { } return false } + +func (sp *shardProcessor) addProcessedMiniBlock(metaBlockHash []byte, miniBlockHash []byte) { + sp.mutProcessedMiniBlocks.Lock() + miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] + if !ok { + miniBlocksProcessed := make(map[string]struct{}) + miniBlocksProcessed[string(miniBlockHash)] = struct{}{} + sp.processedMiniBlocks[string(metaBlockHash)] = miniBlocksProcessed + sp.mutProcessedMiniBlocks.Unlock() + return + } + + miniBlocksProcessed[string(miniBlockHash)] = struct{}{} + sp.mutProcessedMiniBlocks.Unlock() +} + +func (sp *shardProcessor) removeProcessedMiniBlock(miniBlockHash []byte) { + sp.mutProcessedMiniBlocks.Lock() + for _, miniBlocksProcessed := range sp.processedMiniBlocks { + _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] + if isProcessed { + delete(miniBlocksProcessed, string(miniBlockHash)) + } + } + sp.mutProcessedMiniBlocks.Unlock() +} + +func (sp *shardProcessor) removeAllProcessedMiniBlocks(metaBlockHash []byte) { + sp.mutProcessedMiniBlocks.Lock() + delete(sp.processedMiniBlocks, string(metaBlockHash)) + sp.mutProcessedMiniBlocks.Unlock() +} + +func (sp *shardProcessor) getProcessedMiniBlocksHashes(metaBlockHash []byte) map[string]struct{} { + sp.mutProcessedMiniBlocks.RLock() + processedMiniBlocksHashes := sp.processedMiniBlocks[string(metaBlockHash)] + sp.mutProcessedMiniBlocks.RUnlock() + + return processedMiniBlocksHashes +} + +func (sp *shardProcessor) isMiniBlockProcessed(metaBlockHash []byte, miniBlockHash []byte) bool { + sp.mutProcessedMiniBlocks.RLock() + miniBlocksProcessed, ok := sp.processedMiniBlocks[string(metaBlockHash)] + if !ok { + sp.mutProcessedMiniBlocks.RUnlock() + return false + } + + _, isProcessed := miniBlocksProcessed[string(miniBlockHash)] + sp.mutProcessedMiniBlocks.RUnlock() + + return isProcessed +} + +func (sp *shardProcessor) getMaxMiniBlocksSpaceRemained( + maxItemsInBlock uint32, + itemsAddedInBlock uint32, + miniBlocksAddedInBlock uint32, +) int32 { + mbSpaceRemainedInBlock := int32(maxItemsInBlock) - int32(itemsAddedInBlock) + mbSpaceRemainedInCache := int32(core.MaxMiniBlocksInBlock) - int32(miniBlocksAddedInBlock) + maxMbSpaceRemained := core.MinInt32(mbSpaceRemainedInBlock, mbSpaceRemainedInCache) + + return maxMbSpaceRemained +} diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 5116eb7bd3e..afa2f254b5f 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -42,14 +42,16 @@ func initAccountsMock() *mock.AccountsStub { } } -func initBasicTestData() (*mock.PoolsHolderFake, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { - tdp := mock.NewPoolsHolderFake() +func initBasicTestData() (*mock.PoolsHolderMock, *blockchain.BlockChain, []byte, block.Body, [][]byte, *mock.HasherMock, *mock.MarshalizerMock, error, []byte) { + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Round: 1, - Nonce: 1, + Round: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -69,11 +71,12 @@ func initBasicTestData() (*mock.PoolsHolderFake, *blockchain.BlockChain, []byte, return tdp, blkc, rootHash, body, txHashes, hasher, marshalizer, nil, mbHash } -func initBlockHeader(prevHash []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { +func initBlockHeader(prevHash []byte, prevRandSeed []byte, rootHash []byte, mbHdrs []block.MiniBlockHeader) block.Header { hdr := block.Header{ Nonce: 2, Round: 2, PrevHash: prevHash, + PrevRandSeed: prevRandSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -172,17 +175,6 @@ func TestNewShardProcessor_NilForkDetectorShouldErr(t *testing.T) { assert.Nil(t, sp) } -func TestNewShardProcessor_NilBlocksTrackerShouldErr(t *testing.T) { - t.Parallel() - - arguments := CreateMockArguments() - arguments.BlocksTracker = nil - sp, err := blproc.NewShardProcessor(arguments) - - assert.Equal(t, process.ErrNilBlocksTracker, err) - assert.Nil(t, sp) -} - func TestNewShardProcessor_NilRequestTransactionHandlerShouldErr(t *testing.T) { t.Parallel() @@ -296,6 +288,7 @@ func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { Nonce: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), RootHash: []byte("roothash"), } @@ -310,7 +303,7 @@ func TestShardProcessor_ProcessWithDirtyAccountShouldErr(t *testing.T) { err := sp.ProcessBlock(blkc, &hdr, body, haveTime) assert.NotNil(t, err) - assert.Equal(t, err, process.ErrAccountStateDirty) + assert.Equal(t, process.ErrAccountStateDirty, err) } func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { @@ -321,6 +314,7 @@ func TestShardProcessor_ProcessBlockHeaderBodyMismatchShouldErr(t *testing.T) { hdr := block.Header{ Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -395,6 +389,7 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T hdr := block.Header{ Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -429,6 +424,9 @@ func TestShardProcessor_ProcessBlockWithInvalidTransactionShouldErr(t *testing.T }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -467,6 +465,7 @@ func TestShardProcessor_ProcessWithHeaderNotFirstShouldErr(t *testing.T) { Round: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), RootHash: []byte("root hash"), } @@ -486,6 +485,7 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectNonceShouldErr(t *testing.T) Round: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte(""), + PrevRandSeed: []byte("rand seed"), Signature: []byte("signature"), RootHash: []byte("root hash"), } @@ -508,19 +508,23 @@ func TestShardProcessor_ProcessWithHeaderNotCorrectPrevHashShouldErr(t *testing. return 0 }, } + + randSeed := []byte("rand seed") sp, _ := blproc.NewShardProcessor(arguments) hdr := &block.Header{ Nonce: 1, Round: 1, PubKeysBitmap: []byte("0100101"), PrevHash: []byte("zzz"), + PrevRandSeed: randSeed, Signature: []byte("signature"), RootHash: []byte("root hash"), } body := make(block.Body, 0) blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } err := sp.ProcessBlock(blkc, hdr, body, haveTime) @@ -531,9 +535,11 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR t.Parallel() tdp := initDataPool([]byte("tx_hash1")) txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } body := make(block.Body, 0) @@ -563,6 +569,7 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -606,6 +613,9 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR tpm, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -643,11 +653,14 @@ func TestShardProcessor_ProcessBlockWithErrOnProcessBlockTransactionsCallShouldR func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertState(t *testing.T) { t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + randSeed := []byte("rand seed") txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } body := make(block.Body, 0) @@ -677,6 +690,7 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -714,17 +728,20 @@ func TestShardProcessor_ProcessBlockWithErrOnVerifyStateRootCallShouldRevertStat sp, _ := blproc.NewShardProcessor(arguments) // should return err err := sp.ProcessBlock(blkc, &hdr, body, haveTime) - assert.Equal(t, process.ErrRootStateMissmatch, err) + assert.Equal(t, process.ErrRootStateDoesNotMatch, err) assert.True(t, wasCalled) } func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { t.Parallel() + tdp := initDataPool([]byte("tx_hash1")) + randSeed := []byte("rand seed") txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -755,6 +772,7 @@ func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -789,11 +807,14 @@ func TestShardProcessor_ProcessBlockOnlyIntraShardShouldPass(t *testing.T) { func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T) { t.Parallel() + + randSeed := []byte("rand seed") tdp := initDataPool([]byte("tx_hash1")) txHash := []byte("tx_hash1") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 0, + Nonce: 0, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -828,6 +849,7 @@ func TestShardProcessor_ProcessBlockCrossShardWithoutMetaShouldFail(t *testing.T Round: 1, Nonce: 1, PrevHash: []byte(""), + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -871,9 +893,10 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { mbHdrs := make([]block.MiniBlockHeader, 0) mbHdrs = append(mbHdrs, mbHdr) + randSeed := []byte("rand seed") lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -892,6 +915,7 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { meta := block.MetaBlock{ Nonce: 1, ShardInfo: shardHdrs, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -899,8 +923,9 @@ func TestShardProcessor_ProcessBlockCrossShardWithMetaShouldPass(t *testing.T) { tdp.MetaBlocks().Put(metaHash, meta) meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -939,9 +964,11 @@ func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) txHash := []byte("tx_hash1") tdp := initDataPool(txHash) + randSeed := []byte("rand seed") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -974,6 +1001,7 @@ func TestShardProcessor_ProcessBlockHaveTimeLessThanZeroShouldErr(t *testing.T) Round: 2, Nonce: 2, PrevHash: preHash, + PrevRandSeed: randSeed, Signature: []byte("signature"), PubKeysBitmap: []byte("00110"), ShardId: 0, @@ -1003,9 +1031,10 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { mbHdrs := make([]block.MiniBlockHeader, 0) mbHdrs = append(mbHdrs, mbHdr) + randSeed := []byte("rand seed") lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1024,6 +1053,7 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { meta := block.MetaBlock{ Nonce: 1, ShardInfo: shardHdrs, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -1032,8 +1062,9 @@ func TestShardProcessor_ProcessBlockWithMissingMetaHdrShouldErr(t *testing.T) { tdp.MetaBlocks().Put(metaHash, meta) meta = block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -1070,9 +1101,11 @@ func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing txHash := []byte("tx_hash1") tdp := initDataPool(txHash) + randSeed := []byte("rand seed") blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -1102,7 +1135,7 @@ func TestShardProcessor_ProcessBlockWithWrongMiniBlockHeaderShouldErr(t *testing lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) rootHashCalled := func() ([]byte, error) { return rootHash, nil @@ -1129,13 +1162,16 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. ReceiverShardID: 0, SenderShardID: 1, TxCount: uint32(len(txHashes)), - Hash: mbHash} + Hash: mbHash, + } mbHdrs := make([]block.MiniBlockHeader, 0) mbHdrs = append(mbHdrs, mbHdr) lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + randSeed := []byte("rand seed") + + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1155,6 +1191,7 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. Nonce: 1, ShardInfo: shardHdrs, Round: 1, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -1163,9 +1200,10 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. tdp.MetaBlocks().Put(metaHash, meta) meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -1209,12 +1247,14 @@ func TestShardProcessor_CheckAndRequestIfMetaHeadersMissingShouldErr(t *testing. func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") + randSeed := []byte("rand seed") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) blkc := &blockchain.BlockChain{ CurrentBlockHeader: &block.Header{ - Nonce: 1, + Nonce: 1, + RandSeed: randSeed, }, } rootHash := []byte("rootHash") @@ -1244,7 +1284,7 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { lastHdr := blkc.GetCurrentBlockHeader() prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1264,6 +1304,7 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { Nonce: 1, ShardInfo: shardHdrs, Round: 1, + RandSeed: randSeed, } metaBytes, _ := marshalizer.Marshal(meta) metaHash := hasher.Compute(string(metaBytes)) @@ -1272,10 +1313,11 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { tdp.MetaBlocks().Put(metaHash, meta) meta = &block.MetaBlock{ - Nonce: 2, - ShardInfo: make([]block.ShardData, 0), - Round: 2, - PrevHash: metaHash, + Nonce: 2, + ShardInfo: make([]block.ShardData, 0), + Round: 2, + PrevHash: metaHash, + PrevRandSeed: randSeed, } metaBytes, _ = marshalizer.Marshal(meta) metaHash = hasher.Compute(string(metaBytes)) @@ -1295,23 +1337,24 @@ func TestShardProcessor_IsMetaHeaderFinalShouldPass(t *testing.T) { Nonce: 1, ShardInfo: shardHdrs, Round: 1, + RandSeed: randSeed, } ordered, _ := sp.GetOrderedMetaBlocks(3) res = sp.IsMetaHeaderFinal(meta, ordered, 0) assert.True(t, res) } -//-------- requestFinalMissingHeaders -func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { +//-------- requestMissingFinalityAttestingHeaders +func TestShardProcessor_RequestMissingFinalityAttestingHeaders(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - sp.SetCurrHighestMetaHdrNonce(1) - res := sp.RequestFinalMissingHeaders() + sp.SetHighestHdrNonceForCurrentBlock(sharding.MetachainShardId, 1) + res := sp.RequestMissingFinalityAttestingHeaders() assert.Equal(t, res > 0, true) } @@ -1319,7 +1362,7 @@ func TestShardProcessor_RequestFinalMissingHeaders(t *testing.T) { func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) rootHash := []byte("rootHash") @@ -1349,7 +1392,8 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) lastHdr := genesisBlocks[0] prevHash, _ := core.CalculateHash(marshalizer, hasher, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) + randSeed := []byte("rand seed") + hdr := initBlockHeader(prevHash, randSeed, rootHash, mbHdrs) shardMiniBlock := block.ShardMiniBlockHeader{ ReceiverShardId: mbHdr.ReceiverShardID, @@ -1367,30 +1411,30 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing prevMeta := genesisBlocks[sharding.MetachainShardId] prevHash, _ = core.CalculateHash(marshalizer, hasher, prevMeta) - meta := &block.MetaBlock{ + meta1 := &block.MetaBlock{ Nonce: 1, ShardInfo: shardHdrs, Round: 1, PrevHash: prevHash, PrevRandSeed: prevMeta.GetRandSeed(), } - metaBytes, _ := marshalizer.Marshal(meta) - metaHash := hasher.Compute(string(metaBytes)) - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash) + metaBytes, _ := marshalizer.Marshal(meta1) + metaHash1 := hasher.Compute(string(metaBytes)) + hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, metaHash1) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.MetaBlocks().Put(metaHash1, meta1) - prevHash, _ = core.CalculateHash(marshalizer, hasher, meta) - meta = &block.MetaBlock{ + prevHash, _ = core.CalculateHash(marshalizer, hasher, meta1) + meta2 := &block.MetaBlock{ Nonce: 2, ShardInfo: make([]block.ShardData, 0), Round: 2, PrevHash: prevHash, } - metaBytes, _ = marshalizer.Marshal(meta) - metaHash = hasher.Compute(string(metaBytes)) + metaBytes, _ = marshalizer.Marshal(meta2) + metaHash2 := hasher.Compute(string(metaBytes)) - tdp.MetaBlocks().Put(metaHash, meta) + tdp.MetaBlocks().Put(metaHash2, meta2) arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp arguments.Hasher = hasher @@ -1399,31 +1443,22 @@ func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldPass(t *testing sp, _ := blproc.NewShardProcessor(arguments) hdr.Round = 4 - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) + sp.SetHdrForCurrentBlock(metaHash1, meta1, true) + sp.SetHdrForCurrentBlock(metaHash2, meta2, false) + + err := sp.CheckMetaHeadersValidityAndFinality() assert.Nil(t, err) } -func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldErr(t *testing.T) { +func TestShardProcessor_CheckMetaHeadersValidityAndFinalityShouldReturnNilWhenNoMetaBlocksAreUsed(t *testing.T) { t.Parallel() - mbHdrs := make([]block.MiniBlockHeader, 0) - rootHash := []byte("rootHash") - txHash := []byte("txhash1") - txHashes := make([][]byte, 0) - txHashes = append(txHashes, txHash) - - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() genesisBlocks := createGenesisBlocks(mock.NewMultiShardsCoordinatorMock(3)) sp, _ := blproc.NewShardProcessorEmptyWith3shards(tdp, genesisBlocks) - lastHdr := genesisBlocks[0] - prevHash, _ := core.CalculateHash(&mock.MarshalizerMock{}, &mock.HasherMock{}, lastHdr) - hdr := initBlockHeader(prevHash, rootHash, mbHdrs) - - hdr.MetaBlockHashes = append(hdr.MetaBlockHashes, []byte("meta")) - hdr.Round = 0 - err := sp.CheckMetaHeadersValidityAndFinality(&hdr) - assert.Equal(t, err, process.ErrNilMetaBlockHeader) + err := sp.CheckMetaHeadersValidityAndFinality() + assert.Nil(t, err) } //------- CommitBlock @@ -1535,13 +1570,6 @@ func TestShardProcessor_CommitBlockStorageFailsForHeaderShouldErr(t *testing.T) return 0 }, } - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } sp, _ := blproc.NewShardProcessor(arguments) blkc, _ := blockchain.NewBlockChain( @@ -1603,14 +1631,7 @@ func TestShardProcessor_CommitBlockStorageFailsForBodyShouldWork(t *testing.T) { return nil }, GetHighestFinalBlockNonceCalled: func() uint64 { - return 0 - }, - } - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) + return 0 }, } sp, err := blproc.NewShardProcessor(arguments) @@ -1746,6 +1767,9 @@ func TestShardProcessor_CommitBlockNoTxInPoolShouldErr(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -1780,6 +1804,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { rootHash := []byte("root hash") hdrHash := []byte("header hash") + randSeed := []byte("rand seed") prevHdr := &block.Header{ Nonce: 0, @@ -1788,6 +1813,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + RandSeed: randSeed, } hdr := &block.Header{ @@ -1797,6 +1823,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + PrevRandSeed: randSeed, } mb := block.MiniBlock{ TxHashes: [][]byte{txHash}, @@ -1845,13 +1872,6 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { arguments.Hasher = hasher arguments.Accounts = accounts arguments.ForkDetector = fd - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } sp, _ := blproc.NewShardProcessor(arguments) blkc := createTestBlockchain() @@ -1867,7 +1887,7 @@ func TestShardProcessor_CommitBlockOkValsShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, forkDetectorAddCalled) assert.Equal(t, hdrHash, blkc.GetCurrentBlockHeaderHash()) - //this should sleep as there is an async call to display current header and block in CommitBlock + //this should sleep as there is an async call to display current hdr and block in CommitBlock time.Sleep(time.Second) } @@ -1878,6 +1898,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { rootHash := []byte("root hash") hdrHash := []byte("header hash") + randSeed := []byte("rand seed") prevHdr := &block.Header{ Nonce: 0, @@ -1886,6 +1907,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + RandSeed: randSeed, } hdr := &block.Header{ @@ -1895,6 +1917,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { PrevHash: hdrHash, Signature: rootHash, RootHash: rootHash, + PrevRandSeed: randSeed, } mb := block.MiniBlock{ TxHashes: [][]byte{txHash}, @@ -1951,13 +1974,6 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { arguments.Hasher = hasher arguments.Accounts = accounts arguments.ForkDetector = fd - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - AddBlockCalled: func(headerHandler data.HeaderHandler) { - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.TxCoordinator = &mock.TransactionCoordinatorMock{ GetAllCurrentUsedTxsCalled: func(blockType block.Type) map[string]data.TransactionHandler { switch blockType { @@ -2099,7 +2115,7 @@ func TestNode_ComputeNewNoncePrevHashShouldWork(t *testing.T) { } hasher.ComputeCalled = func(s string) []byte { if s == "hdrHeaderMarshalized" { - return []byte("hdr hash") + return []byte("header hash") } if s == "txBlockBodyMarshalized" { return []byte("tx block body hash") @@ -2301,6 +2317,9 @@ func TestShardProcessor_MarshalizedDataToBroadcastShouldWork(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -2401,6 +2420,9 @@ func TestShardProcessor_MarshalizedDataMarshalWithoutSuccess(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -2434,7 +2456,7 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a metablock that will return 3 miniblock hashes //1 miniblock hash will be in cache @@ -2444,19 +2466,22 @@ func TestShardProcessor_ReceivedMetaBlockShouldRequestMissingMiniBlocks(t *testi miniBlockHash2 := []byte("miniblock hash 2") miniBlockHash3 := []byte("miniblock hash 3") - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - string(miniBlockHash2): 0, - string(miniBlockHash3): 0, - } - }, - } + metaBlock := &block.MetaBlock{ + Nonce: 1, + Round: 1, + ShardInfo: []block.ShardData{ + { + ShardId: 1, + ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ + {Hash: miniBlockHash1, SenderShardId: 1, ReceiverShardId: 0}, + {Hash: miniBlockHash2, SenderShardId: 1, ReceiverShardId: 0}, + {Hash: miniBlockHash3, SenderShardId: 1, ReceiverShardId: 0}, + }}, + }} //put this metaBlock inside datapool metaBlockHash := []byte("metablock hash") - dataPool.MetaBlocks().Put(metaBlockHash, &metaBlock) + dataPool.MetaBlocks().Put(metaBlockHash, metaBlock) //put the existing miniblock inside datapool dataPool.MiniBlocks().Put(miniBlockHash1, &block.MiniBlock{}) @@ -2509,7 +2534,7 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a metablock that will return 3 miniblock hashes //1 miniblock hash will be in cache @@ -2517,13 +2542,16 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin miniBlockHash1 := []byte("miniblock hash 1 found in cache") - metaBlock := mock.HeaderHandlerStub{ - GetMiniBlockHeadersWithDstCalled: func(destId uint32) map[string]uint32 { - return map[string]uint32{ - string(miniBlockHash1): 0, - } - }, - } + metaBlock := &block.MetaBlock{ + Nonce: 1, + Round: 1, + ShardInfo: []block.ShardData{ + block.ShardData{ + ShardId: 1, + ShardMiniBlockHeaders: []block.ShardMiniBlockHeader{ + block.ShardMiniBlockHeader{Hash: miniBlockHash1, SenderShardId: 1, ReceiverShardId: 0}, + }}, + }} //put this metaBlock inside datapool metaBlockHash := []byte("metablock hash") @@ -2562,7 +2590,7 @@ func TestShardProcessor_ReceivedMetaBlockNoMissingMiniBlocksShouldPass(t *testin func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte("tx_hash1") tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) body := make(block.Body, 0) @@ -2620,17 +2648,17 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMe(t *testing.T) { arguments := CreateMockArgumentsMultiShard() arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + miniBlockSlice, usedMetaHdrsHashes, noOfTxs, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, 2, haveTimeTrue) assert.Equal(t, err == nil, true) assert.Equal(t, len(miniBlockSlice) == 0, true) - assert.Equal(t, len(usedMetaHdrsHashes) == 0, true) + assert.Equal(t, usedMetaHdrsHashes, uint32(0)) assert.Equal(t, noOfTxs, uint32(0)) } func TestShardProcessor_NewShardProcessorWrongTypeOfStartHeaderShouldErrWrongTypeAssertion(t *testing.T) { t.Parallel() - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() txHash := []byte(nil) tdp.Transactions().AddData(txHash, &transaction.Transaction{}, process.ShardCacherIdentifier(1, 0)) @@ -2654,7 +2682,7 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo haveTimeTrue := func() bool { return true } - tdp := mock.NewPoolsHolderFake() + tdp := mock.NewPoolsHolderMock() destShardId := uint32(2) hasher := &mock.HasherStub{} @@ -2727,10 +2755,10 @@ func TestShardProcessor_CreateAndProcessCrossMiniBlocksDstMeProcessPartOfMiniBlo arguments.DataPool = tdp sp, _ := blproc.NewShardProcessor(arguments) - miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(3, 2, 2, haveTimeTrue) + miniBlocksReturned, usedMetaHdrsHashes, nrTxAdded, err := sp.CreateAndProcessCrossMiniBlocksDstMe(2, 2, haveTimeTrue) assert.Equal(t, 0, len(miniBlocksReturned)) - assert.Equal(t, 0, len(usedMetaHdrsHashes)) + assert.Equal(t, uint32(0), usedMetaHdrsHashes) assert.Equal(t, uint32(0), nrTxAdded) assert.Nil(t, err) } @@ -2742,7 +2770,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a 3 txs in pool @@ -2815,6 +2843,13 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T txProcessorMock, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{ + MinGasLimitCalled: func() uint64 { + return 0 + }, + }, ) container, _ := factory.Create() @@ -2835,7 +2870,7 @@ func TestShardProcessor_CreateMiniBlocksShouldWorkWithIntraShardTxs(t *testing.T arguments.TxCoordinator = tc bp, _ := blproc.NewShardProcessor(arguments) - blockBody, err := bp.CreateMiniBlocks(1, 15000, 0, func() bool { return true }) + blockBody, err := bp.CreateMiniBlocks(15000, 0, func() bool { return true }) assert.Nil(t, err) //testing execution @@ -2863,7 +2898,7 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() miniblockHashes := make([][]byte, 6) @@ -2874,20 +2909,23 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { } //put 3 metablocks in pool - mb1Hash := []byte("meta block 1") + metaBlockHash1 := []byte("meta block 1") + metaBlock1 := createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]) dataPool.MetaBlocks().Put( - mb1Hash, - createDummyMetaBlock(destShardId, destShards[0], miniblockHashes[0], miniblockHashes[1]), + metaBlockHash1, + metaBlock1, ) - mb2Hash := []byte("meta block 2") + metaBlockHash2 := []byte("meta block 2") + metaBlock2 := createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]) dataPool.MetaBlocks().Put( - mb2Hash, - createDummyMetaBlock(destShardId, destShards[1], miniblockHashes[2], miniblockHashes[3]), + metaBlockHash2, + metaBlock2, ) - mb3Hash := []byte("meta block 3") + metaBlockHash3 := []byte("meta block 3") + metaBlock3 := createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]) dataPool.MetaBlocks().Put( - mb3Hash, - createDummyMetaBlock(destShardId, destShards[2], miniblockHashes[4], miniblockHashes[5]), + metaBlockHash3, + metaBlock3, ) shardCoordinator := mock.NewMultipleShardsCoordinatorMock() @@ -2904,14 +2942,13 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { return 0 }, } - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - } arguments.StartHeaders = createGenesisBlocks(shardCoordinator) bp, _ := blproc.NewShardProcessor(arguments) + bp.SetHdrForCurrentBlock(metaBlockHash1, metaBlock1, true) + bp.SetHdrForCurrentBlock(metaBlockHash2, metaBlock2, true) + bp.SetHdrForCurrentBlock(metaBlockHash3, metaBlock3, true) + //create mini block headers with first 3 miniblocks from miniblocks var mbHeaders := []block.MiniBlockHeader{ {Hash: miniblockHashes[0]}, @@ -2920,24 +2957,22 @@ func TestShardProcessor_GetProcessedMetaBlockFromPoolShouldWork(t *testing.T) { } hashes := [][]byte{ - mb1Hash, - mb2Hash, - mb3Hash, + metaBlockHash1, + metaBlockHash2, + metaBlockHash3, } blockHeader := &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - _, err := bp.GetProcessedMetaBlocksFromHeader(blockHeader) + err := bp.AddProcessedCrossMiniBlocksFromHeader(blockHeader) assert.Nil(t, err) //check WasMiniBlockProcessed for remaining metablocks - metaBlock2Recov, _ := dataPool.MetaBlocks().Get(mb2Hash) - assert.True(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[2])) - assert.False(t, (metaBlock2Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[3])) + assert.True(t, bp.IsMiniBlockProcessed(metaBlockHash2, miniblockHashes[2])) + assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash2, miniblockHashes[3])) - metaBlock3Recov, _ := dataPool.MetaBlocks().Get(mb3Hash) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[4])) - assert.False(t, (metaBlock3Recov.(data.HeaderHandler)).GetMiniBlockProcessed(miniblockHashes[5])) + assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash3, miniblockHashes[4])) + assert.False(t, bp.IsMiniBlockProcessed(metaBlockHash3, miniblockHashes[5])) } func TestBlockProcessor_RestoreBlockIntoPoolsShouldErrNilBlockHeader(t *testing.T) { @@ -2970,7 +3005,7 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { txHash := []byte("tx hash 1") - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() marshalizerMock := &mock.MarshalizerMock{} hasherMock := &mock.HasherStub{} @@ -3005,6 +3040,9 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) container, _ := factory.Create() @@ -3041,22 +3079,32 @@ func TestShardProcessor_RestoreBlockIntoPoolsShouldWork(t *testing.T) { metablockHash := []byte("meta block hash 1") metablockHeader := createDummyMetaBlock(0, 1, miniblockHash) - metablockHeader.SetMiniBlockProcessed(metablockHash, true) dataPool.MetaBlocks().Put( metablockHash, metablockHeader, ) - err = sp.RestoreBlockIntoPools(&block.Header{}, body) + store.GetCalled = func(unitType dataRetriever.UnitType, key []byte) ([]byte, error) { + return marshalizerMock.Marshal(metablockHeader) + } + + miniBlockHeader := block.MiniBlockHeader{ + Hash: miniblockHash, + SenderShardID: miniblock.SenderShardID, + ReceiverShardID: miniblock.ReceiverShardID, + } + + metaBlockHashes := make([][]byte, 0) + metaBlockHashes = append(metaBlockHashes, metablockHash) + + err = sp.RestoreBlockIntoPools(&block.Header{MetaBlockHashes: [][]byte{metablockHash}, MiniBlockHeaders: []block.MiniBlockHeader{miniBlockHeader}}, body) miniblockFromPool, _ := dataPool.MiniBlocks().Get(miniblockHash) txFromPool, _ := dataPool.Transactions().SearchFirstData(txHash) - metablockFromPool, _ := dataPool.MetaBlocks().Get(metablockHash) - metablock := metablockFromPool.(*block.MetaBlock) assert.Nil(t, err) assert.Equal(t, &miniblock, miniblockFromPool) assert.Equal(t, &tx, txFromPool) - assert.Equal(t, false, metablock.GetMiniBlockProcessed(miniblockHash)) + assert.Equal(t, false, sp.IsMiniBlockProcessed(metablockHash, miniblockHash)) } func TestShardProcessor_DecodeBlockBody(t *testing.T) { @@ -3121,14 +3169,6 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { arguments.Hasher = hasher arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) @@ -3174,7 +3214,7 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { currHdr.Nonce = 0 prevHdr.Nonce = 0 err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRootStateMissmatch) + assert.Equal(t, err, process.ErrRootStateDoesNotMatch) currHdr.Nonce = 0 prevHdr.Nonce = 0 @@ -3186,7 +3226,7 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { prevHdr.Nonce = 45 prevHdr.Round = currHdr.Round + 1 err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrLowerRoundInOtherChainBlock) + assert.Equal(t, err, process.ErrLowerRoundInBlock) prevHdr.Round = currHdr.Round - 1 currHdr.Nonce = prevHdr.Nonce + 2 @@ -3194,16 +3234,17 @@ func TestShardProcessor_IsHdrConstructionValid(t *testing.T) { assert.Equal(t, err, process.ErrWrongNonceInBlock) currHdr.Nonce = prevHdr.Nonce + 1 - prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash = []byte("wronghash") err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrRandSeedMismatch) + assert.Equal(t, err, process.ErrBlockHashDoesNotMatch) - prevHdr.RandSeed = currRandSeed - currHdr.PrevHash = []byte("wronghash") + prevHdr.RandSeed = []byte("randomwrong") + currHdr.PrevHash, _ = sp.ComputeHeaderHash(prevHdr) err = sp.IsHdrConstructionValid(currHdr, prevHdr) - assert.Equal(t, err, process.ErrHashDoesNotMatchInOtherChainBlock) + assert.Equal(t, err, process.ErrRandSeedDoesNotMatch) currHdr.PrevHash = prevHash + prevHdr.RandSeed = currRandSeed prevHdr.RootHash = []byte("prevRootHash") err = sp.IsHdrConstructionValid(currHdr, prevHdr) assert.Nil(t, err) @@ -3214,7 +3255,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() forkDetector := &mock.ForkDetectorMock{} highNonce := uint64(500) forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { @@ -3237,14 +3278,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) @@ -3258,8 +3291,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { RandSeed: prevRandSeed} notarizedHdrs[sharding.MetachainShardId] = append(notarizedHdrs[sharding.MetachainShardId], lastHdr) - //put the existing headers inside datapool - //header shard 0 prevHash, _ := sp.ComputeHeaderHash(sp.LastNotarizedHdrForShard(sharding.MetachainShardId).(*block.MetaBlock)) prevHdr := &block.MetaBlock{ @@ -3286,13 +3317,13 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { blockHeader := &block.Header{} // test header not in pool and defer called - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err := sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 0, putCalledNr) @@ -3302,18 +3333,19 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { // wrong header type in pool and defer called dataPool.MetaBlocks().Put(currHash, shardHdr) + sp.SetHdrForCurrentBlock(currHash, shardHdr, true) hashes := make([][]byte, 0) hashes = append(hashes, currHash) blockHeader = &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err = sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Equal(t, process.ErrWrongTypeAssertion, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 0, putCalledNr) @@ -3324,18 +3356,22 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNoDstMB(t *testing.T) { dataPool.MetaBlocks().Put(currHash, currHdr) dataPool.MetaBlocks().Put(prevHash, prevHdr) + sp.CreateBlockStarted() + sp.SetHdrForCurrentBlock(currHash, currHdr, true) + sp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + hashes = make([][]byte, 0) hashes = append(hashes, currHash) hashes = append(hashes, prevHash) blockHeader = &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err = sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err = sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 4, putCalledNr) @@ -3371,7 +3407,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() forkDetector := &mock.ForkDetectorMock{} highNonce := uint64(500) forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { @@ -3395,14 +3431,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) @@ -3484,18 +3512,21 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrNotAllMBFinished(t *tes dataPool.MetaBlocks().Put(currHash, currHdr) dataPool.MetaBlocks().Put(prevHash, prevHdr) + sp.SetHdrForCurrentBlock(currHash, currHdr, true) + sp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + hashes := make([][]byte, 0) hashes = append(hashes, currHash) hashes = append(hashes, prevHash) blockHeader := &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err := sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 2, putCalledNr) @@ -3507,7 +3538,7 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() forkDetector := &mock.ForkDetectorMock{} highNonce := uint64(500) forkDetector.GetHighestFinalBlockNonceCalled = func() uint64 { @@ -3530,14 +3561,6 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin arguments.Marshalizer = marshalizer arguments.ShardCoordinator = mock.NewMultiShardsCoordinatorMock(shardNr) arguments.ForkDetector = forkDetector - arguments.BlocksTracker = &mock.BlocksTrackerMock{ - RemoveNotarisedBlocksCalled: func(headerHandler data.HeaderHandler) error { - return nil - }, - UnnotarisedBlocksCalled: func() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) - }, - } arguments.StartHeaders = createGenesisBlocks(arguments.ShardCoordinator) sp, _ := blproc.NewShardProcessor(arguments) @@ -3629,19 +3652,22 @@ func TestShardProcessor_RemoveAndSaveLastNotarizedMetaHdrAllMBFinished(t *testin PrevHash: currHash, Nonce: 47}) + sp.SetHdrForCurrentBlock(currHash, currHdr, true) + sp.SetHdrForCurrentBlock(prevHash, prevHdr, true) + hashes := make([][]byte, 0) hashes = append(hashes, currHash) hashes = append(hashes, prevHash) blockHeader := &block.Header{MetaBlockHashes: hashes, MiniBlockHeaders: mbHeaders} - processedMetaHdrs, err := sp.GetProcessedMetaBlocksFromHeader(blockHeader) + processedMetaHdrs, err := sp.GetOrderedProcessedMetaBlocksFromHeader(blockHeader) assert.Nil(t, err) assert.Equal(t, 2, len(processedMetaHdrs)) err = sp.SaveLastNotarizedHeader(sharding.MetachainShardId, processedMetaHdrs) assert.Nil(t, err) - err = sp.RemoveProcessedMetablocksFromPool(processedMetaHdrs) + err = sp.RemoveProcessedMetaBlocksFromPool(processedMetaHdrs) assert.Nil(t, err) assert.Equal(t, 4, putCalledNr) @@ -3752,7 +3778,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolShouldPass(t *testing.T) { marshalizer := &mock.MarshalizerMock{} - poolFake := mock.NewPoolsHolderFake() + poolFake := mock.NewPoolsHolderMock() metaBlock := block.MetaBlock{ Nonce: 1, @@ -3836,54 +3862,26 @@ func TestShardPreprocessor_getAllMiniBlockDstMeFromMetaShouldPass(t *testing.T) } shardHdrs := make([]block.ShardData, 0) shardHdrs = append(shardHdrs, shardHeader) + metaBlock := &block.MetaBlock{Nonce: 1, Round: 1, ShardInfo: shardHdrs} idp := initDataPool([]byte("tx_hash1")) - idp.MetaBlocksCalled = func() storage.Cacher { - return &mock.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, []byte("tx1_hash")) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return nil - }, - LenCalled: func() int { - return 0 - }, - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return &block.MetaBlock{ - Nonce: 1, - Round: 1, - ShardInfo: shardHdrs, - }, true - }, - PutCalled: func(key []byte, value interface{}) (evicted bool) { - return true - }, - RegisterHandlerCalled: func(i func(key []byte)) {}, - } - } arguments := CreateMockArgumentsMultiShard() arguments.DataPool = idp sp, _ := blproc.NewShardProcessor(arguments) - meta := block.MetaBlock{ - Nonce: 0, - ShardInfo: make([]block.ShardData, 0), - } - - metaBytes, _ := marshalizer.Marshal(meta) + metaBytes, _ := marshalizer.Marshal(metaBlock) hasher.ComputeCalled = func(s string) []byte { return []byte("cool") } metaHash := hasher.Compute(string(metaBytes)) + sp.SetHdrForCurrentBlock(metaHash, metaBlock, true) + metablockHashes := make([][]byte, 0) metablockHashes = append(metablockHashes, metaHash) + header := &block.Header{Nonce: 1, Round: 1, MetaBlockHashes: metablockHashes} - orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(1, metablockHashes) + orderedMetaBlocks, err := sp.GetAllMiniBlockDstMeFromMeta(header) assert.Equal(t, 1, len(orderedMetaBlocks)) assert.Equal(t, orderedMetaBlocks[""], metaHash) @@ -3895,7 +3893,7 @@ func TestShardProcessor_GetHighestHdrForOwnShardFromMetachainNothingToProcess(t arguments := CreateMockArgumentsMultiShard() sp, _ := blproc.NewShardProcessor(arguments) - hdrs,_,_ := sp.GetHighestHdrForOwnShardFromMetachain(nil) + hdrs, _, _ := sp.GetHighestHdrForOwnShardFromMetachain(nil) assert.NotNil(t, hdrs) assert.Equal(t, uint64(0), hdrs[0].GetNonce()) @@ -4110,13 +4108,13 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { t.Parallel() marshalizer := &mock.MarshalizerMock{} - poolFake := mock.NewPoolsHolderFake() + poolMock := mock.NewPoolsHolderMock() storer := &mock.ChainStorerMock{} shardC := mock.NewMultiShardsCoordinatorMock(3) arguments := CreateMockArgumentsMultiShard() - arguments.DataPool = poolFake + arguments.DataPool = poolMock arguments.Store = storer arguments.ShardCoordinator = shardC arguments.StartHeaders = createGenesisBlocks(shardC) @@ -4142,7 +4140,7 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { Nonce: 1, ShardInfo: shardInfos, } - meta.SetMiniBlockProcessed(testMBHash, true) + hasher := &mock.HasherStub{} metaBytes, _ := marshalizer.Marshal(meta) @@ -4150,10 +4148,11 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { return []byte("cool") } metaHash := hasher.Compute(string(metaBytes)) + sp.AddProcessedMiniBlock(metaHash, testMBHash) metablockHashes := make([][]byte, 0) metablockHashes = append(metablockHashes, metaHash) - metaBlockRestored, ok := poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, ok := poolMock.MetaBlocks().Get(metaHash) assert.Equal(t, nil, metaBlockRestored) assert.False(t, ok) @@ -4171,9 +4170,9 @@ func TestShardProcessor_RestoreMetaBlockIntoPoolVerifyMiniblocks(t *testing.T) { err := sp.RestoreMetaBlockIntoPool(miniblockHashes, metablockHashes) - metaBlockRestored, _ = poolFake.MetaBlocks().Get(metaHash) + metaBlockRestored, _ = poolMock.MetaBlocks().Get(metaHash) assert.Equal(t, meta, metaBlockRestored) assert.Nil(t, err) - assert.True(t, meta.GetMiniBlockProcessed(testMBHash)) + assert.True(t, sp.IsMiniBlockProcessed(metaHash, testMBHash)) } diff --git a/process/common.go b/process/common.go index 5cca6b46cd1..840667a59df 100644 --- a/process/common.go +++ b/process/common.go @@ -1,6 +1,8 @@ package process import ( + "sort" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -552,3 +554,12 @@ func getHeaderHashFromStorageWithNonce( return hash, nil } + +// SortHeadersByNonce will sort a given list of headers by nonce +func SortHeadersByNonce(headers []data.HeaderHandler) { + if len(headers) > 1 { + sort.Slice(headers, func(i, j int) bool { + return headers[i].GetNonce() < headers[j].GetNonce() + }) + } +} diff --git a/process/common_test.go b/process/common_test.go index 7ecdb90bae2..42bad1ed51f 100644 --- a/process/common_test.go +++ b/process/common_test.go @@ -3,8 +3,11 @@ package process_test import ( "bytes" "sync" + "sync/atomic" "testing" + "time" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -34,30 +37,42 @@ func TestEmptyChannelShouldWorkOnBufferedChannel(t *testing.T) { assert.Equal(t, 3, readsCnt) } -func TestEmptyChannelShouldWorkOnNotBufferdChannel(t *testing.T) { +func TestEmptyChannelShouldWorkOnNotBufferedChannel(t *testing.T) { ch := make(chan bool) assert.Equal(t, 0, len(ch)) - readsCnt := process.EmptyChannel(ch) + readsCnt := int32(process.EmptyChannel(ch)) assert.Equal(t, 0, len(ch)) - assert.Equal(t, 0, readsCnt) + assert.Equal(t, int32(0), readsCnt) wg := sync.WaitGroup{} + wgChanWasWritten := sync.WaitGroup{} numConcurrentWrites := 100 wg.Add(numConcurrentWrites) + wgChanWasWritten.Add(numConcurrentWrites) for i := 0; i < numConcurrentWrites; i++ { go func() { wg.Done() + time.Sleep(time.Millisecond) ch <- true + wgChanWasWritten.Done() }() } // wait for go routines to start wg.Wait() - readsCnt = process.EmptyChannel(ch) + go func() { + for readsCnt < int32(numConcurrentWrites) { + atomic.AddInt32(&readsCnt, int32(process.EmptyChannel(ch))) + } + }() + + // wait for go routines to finish + wgChanWasWritten.Wait() + assert.Equal(t, 0, len(ch)) - assert.Equal(t, numConcurrentWrites, readsCnt) + assert.Equal(t, int32(numConcurrentWrites), atomic.LoadInt32(&readsCnt)) } func TestGetShardHeaderShouldErrNilCacher(t *testing.T) { @@ -2096,3 +2111,21 @@ func TestGetTransactionHandlerFromStorageShouldWork(t *testing.T) { assert.Nil(t, err) assert.Equal(t, txFromPool, tx) } + +func TestSortHeadersByNonceShouldWork(t *testing.T) { + headers := []data.HeaderHandler{ + &block.Header{Nonce: 3}, + &block.Header{Nonce: 2}, + &block.Header{Nonce: 1}, + } + + assert.Equal(t, uint64(3), headers[0].GetNonce()) + assert.Equal(t, uint64(2), headers[1].GetNonce()) + assert.Equal(t, uint64(1), headers[2].GetNonce()) + + process.SortHeadersByNonce(headers) + + assert.Equal(t, uint64(1), headers[0].GetNonce()) + assert.Equal(t, uint64(2), headers[1].GetNonce()) + assert.Equal(t, uint64(3), headers[2].GetNonce()) +} diff --git a/process/constants.go b/process/constants.go index 2a5be77344e..9d87a1ed5d6 100644 --- a/process/constants.go +++ b/process/constants.go @@ -24,6 +24,8 @@ const ( SCDeployment // SCInvoking defines ID of a transaction of type smart contract call SCInvoking + // RewardTx defines ID of a reward transaction + RewardTx // InvalidTransaction defines unknown transaction type InvalidTransaction ) @@ -32,7 +34,8 @@ const ShardBlockFinality = 1 const MetaBlockFinality = 1 const MaxHeaderRequestsAllowed = 10 const MaxItemsInBlock = 15000 -const MinItemsInBlock = 1000 +const MinItemsInBlock = 15000 +const MaxNoncesDifference = 5 // TODO - calculate exactly in case of the VM, for every VM to have a similar constant, operations / seconds const MaxGasLimitPerMiniBlock = uint64(100000) diff --git a/process/coordinator/process.go b/process/coordinator/process.go index a13526cade9..9002696b0d2 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -74,6 +74,7 @@ func NewTransactionCoordinator( if tc.miniBlockPool == nil || tc.miniBlockPool.IsInterfaceNil() { return nil, process.ErrNilMiniBlockPool } + tc.miniBlockPool.RegisterHandler(tc.receivedMiniBlock) tc.onRequestMiniBlock = requestHandler.RequestMiniBlock @@ -206,8 +207,7 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error errMutex := sync.Mutex{} wg := sync.WaitGroup{} - // Length of body types + another go routine for the intermediate transactions - wg.Add(len(separatedBodies)) + wg.Add(len(separatedBodies) + len(tc.keysInterimProcs)) for key, value := range separatedBodies { go func(blockType block.Type, blockBody block.Body) { @@ -230,48 +230,52 @@ func (tc *transactionCoordinator) SaveBlockDataToStorage(body block.Body) error }(key, value) } - wg.Wait() + for _, blockType := range tc.keysInterimProcs { + go func(blockType block.Type) { + intermediateProc := tc.getInterimProcessor(blockType) + if intermediateProc == nil { + wg.Done() + return + } - intermediatePreproc := tc.getInterimProcessor(block.SmartContractResultBlock) - if intermediatePreproc == nil { - return errFound - } + err := intermediateProc.SaveCurrentIntermediateTxToStorage() + if err != nil { + log.Debug(err.Error()) - err := intermediatePreproc.SaveCurrentIntermediateTxToStorage() - if err != nil { - log.Debug(err.Error()) + errMutex.Lock() + errFound = err + errMutex.Unlock() + } - errMutex.Lock() - errFound = err - errMutex.Unlock() + wg.Done() + }(blockType) } + wg.Wait() + return errFound } // RestoreBlockDataFromStorage restores block data from storage to pool -func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) { +func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) (int, error) { separatedBodies := tc.separateBodyByType(body) var errFound error localMutex := sync.Mutex{} totalRestoredTx := 0 - restoredMbHashes := make(map[int][][]byte) wg := sync.WaitGroup{} wg.Add(len(separatedBodies)) for key, value := range separatedBodies { go func(blockType block.Type, blockBody block.Body) { - restoredMbs := make(map[int][]byte) - preproc := tc.getPreProcessor(blockType) if preproc == nil { wg.Done() return } - restoredTxs, restoredMbs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) + restoredTxs, err := preproc.RestoreTxBlockIntoPools(blockBody, tc.miniBlockPool) if err != nil { log.Debug(err.Error()) @@ -283,10 +287,6 @@ func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) ( localMutex.Lock() totalRestoredTx += restoredTxs - for shId, mbHash := range restoredMbs { - restoredMbHashes[shId] = append(restoredMbHashes[shId], mbHash) - } - localMutex.Unlock() wg.Done() @@ -295,7 +295,7 @@ func (tc *transactionCoordinator) RestoreBlockDataFromStorage(body block.Body) ( wg.Wait() - return totalRestoredTx, restoredMbHashes, errFound + return totalRestoredTx, errFound } // RemoveBlockDataFromPool deletes block data from pools @@ -337,10 +337,14 @@ func (tc *transactionCoordinator) RemoveBlockDataFromPool(body block.Body) error func (tc *transactionCoordinator) ProcessBlockTransaction( body block.Body, round uint64, - haveTime func() time.Duration, + timeRemaining func() time.Duration, ) error { - separatedBodies := tc.separateBodyByType(body) + haveTime := func() bool { + return timeRemaining() >= 0 + } + + separatedBodies := tc.separateBodyByType(body) // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysTxPreProcs { if separatedBodies[blockType] == nil { @@ -365,6 +369,7 @@ func (tc *transactionCoordinator) ProcessBlockTransaction( // with destination of current shard func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe( hdr data.HeaderHandler, + processedMiniBlocksHashes map[string]struct{}, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, @@ -372,7 +377,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe ) (block.MiniBlockSlice, uint32, bool) { miniBlocks := make(block.MiniBlockSlice, 0) nrTxAdded := uint32(0) - nrMBprocessed := 0 + nrMiniBlocksProcessed := 0 if hdr == nil || hdr.IsInterfaceNil() { return miniBlocks, nrTxAdded, true @@ -384,8 +389,9 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe break } - if hdr.GetMiniBlockProcessed([]byte(key)) { - nrMBprocessed++ + _, ok := processedMiniBlocksHashes[key] + if ok { + nrMiniBlocksProcessed++ continue } @@ -424,7 +430,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe // all txs processed, add to processed miniblocks miniBlocks = append(miniBlocks, miniBlock) nrTxAdded = nrTxAdded + uint32(len(miniBlock.TxHashes)) - nrMBprocessed++ + nrMiniBlocksProcessed++ mbOverFlow := uint32(len(miniBlocks)) >= maxMbRemaining if mbOverFlow { @@ -432,7 +438,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe } } - allMBsProcessed := nrMBprocessed == len(crossMiniBlockHashes) + allMBsProcessed := nrMiniBlocksProcessed == len(crossMiniBlockHashes) return miniBlocks, nrTxAdded, allMBsProcessed } @@ -444,43 +450,27 @@ func (tc *transactionCoordinator) CreateMbsAndProcessTransactionsFromMe( haveTime func() bool, ) block.MiniBlockSlice { - txPreProc := tc.getPreProcessor(block.TxBlock) - if txPreProc == nil || txPreProc.IsInterfaceNil() { - return nil - } - miniBlocks := make(block.MiniBlockSlice, 0) - txSpaceRemained := int(maxTxSpaceRemained) - - newMBAdded := true - for newMBAdded { - newMBAdded = false + for _, blockType := range tc.keysTxPreProcs { - for shardId := uint32(0); shardId < tc.shardCoordinator.NumberOfShards(); shardId++ { - if txSpaceRemained <= 0 { - break - } + txPreProc := tc.getPreProcessor(blockType) + if txPreProc == nil || txPreProc.IsInterfaceNil() { + return nil + } - mbSpaceRemained := int(maxMbSpaceRemained) - len(miniBlocks) - if mbSpaceRemained <= 0 { - break - } + mbs, err := txPreProc.CreateAndProcessMiniBlocks( + maxTxSpaceRemained, + maxMbSpaceRemained, + round, + haveTime, + ) - miniBlock, err := txPreProc.CreateAndProcessMiniBlock( - tc.shardCoordinator.SelfId(), - shardId, - txSpaceRemained, - haveTime, - round) - if err != nil { - continue - } + if err != nil { + log.Error(err.Error()) + } - if len(miniBlock.TxHashes) > 0 { - txSpaceRemained -= len(miniBlock.TxHashes) - miniBlocks = append(miniBlocks, miniBlock) - newMBAdded = true - } + if len(mbs) > 0 { + miniBlocks = append(miniBlocks, mbs...) } } @@ -497,6 +487,11 @@ func (tc *transactionCoordinator) processAddedInterimTransactions() block.MiniBl // processing has to be done in order, as the order of different type of transactions over the same account is strict for _, blockType := range tc.keysInterimProcs { + if blockType == block.RewardsBlock { + // this has to be processed last + continue + } + interimProc := tc.getInterimProcessor(blockType) if interimProc == nil { // this will never be reached as keysInterimProcs are the actual keys from the interimMap @@ -561,6 +556,8 @@ func createBroadcastTopic(shardC sharding.Coordinator, destShId uint32, mbType b baseTopic = factory.PeerChBodyTopic case block.SmartContractResultBlock: baseTopic = factory.UnsignedTransactionTopic + case block.RewardsBlock: + baseTopic = factory.RewardsTransactionTopic default: return "", process.ErrUnknownBlockType } @@ -694,7 +691,7 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( snapshot := tc.accounts.JournalLen() err := preproc.ProcessMiniBlock(miniBlock, haveTime, round) if err != nil { - log.Debug(err.Error()) + log.Error(err.Error()) errAccountState := tc.accounts.RevertToSnapshot(snapshot) if errAccountState != nil { // TODO: evaluate if reloading the trie from disk will might solve the problem @@ -710,14 +707,20 @@ func (tc *transactionCoordinator) processCompleteMiniBlock( // VerifyCreatedBlockTransactions checks whether the created transactions are the same as the one proposed func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body) error { tc.mutInterimProcessors.RLock() - + defer tc.mutInterimProcessors.RUnlock() errMutex := sync.Mutex{} var errFound error - + // TODO: think if it is good in parallel or it is needed in sequences wg := sync.WaitGroup{} wg.Add(len(tc.interimProcessors)) - for _, interimProc := range tc.interimProcessors { + for key, interimProc := range tc.interimProcessors { + if key == block.RewardsBlock { + // this has to be processed last + wg.Done() + continue + } + go func(intermediateProcessor process.IntermediateTransactionHandler) { err := intermediateProcessor.VerifyInterMiniBlocks(body) if err != nil { @@ -730,9 +733,17 @@ func (tc *transactionCoordinator) VerifyCreatedBlockTransactions(body block.Body } wg.Wait() - tc.mutInterimProcessors.RUnlock() - return errFound + if errFound != nil { + return errFound + } + + interimProc := tc.getInterimProcessor(block.RewardsBlock) + if interimProc == nil { + return nil + } + + return interimProc.VerifyInterMiniBlocks(body) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/coordinator/process_test.go b/process/coordinator/process_test.go index 83b69dfee57..3829e1a94d4 100644 --- a/process/coordinator/process_test.go +++ b/process/coordinator/process_test.go @@ -14,12 +14,14 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/shardedData" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -29,68 +31,69 @@ import ( "github.com/stretchr/testify/assert" ) -func initDataPool(testHash []byte) *mock.PoolsHolderStub { - sdp := &mock.PoolsHolderStub{ - TransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10, Data: id}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2"), testHash} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &transaction.Transaction{Nonce: 10}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } +func FeeHandlerMock() *mock.FeeHandlerStub { + return &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return 0 }, - UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { - return &mock.ShardedDataStub{ - RegisterHandlerCalled: func(i func(key []byte)) {}, - ShardDataStoreCalled: func(id string) (c storage.Cacher) { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")}, true - } - return nil, false - }, - KeysCalled: func() [][]byte { - return [][]byte{[]byte("key1"), []byte("key2")} - }, - LenCalled: func() int { - return 0 - }, - } - }, - RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, - SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { - if reflect.DeepEqual(key, testHash) { - return &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")}, true - } - return nil, false - }, - AddDataCalled: func(key []byte, data interface{}, cacheId string) { - }, - } + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 }, + } +} + +func createShardedDataChacherNotifier( + handler data.TransactionHandler, + testHash []byte, +) func() dataRetriever.ShardedDataCacherNotifier { + return func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) {}, + ShardDataStoreCalled: func(id string) (c storage.Cacher) { + return &mock.CacherStub{ + PeekCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, testHash) { + return handler, true + } + return nil, false + }, + KeysCalled: func() [][]byte { + return [][]byte{[]byte("key1"), []byte("key2")} + }, + LenCalled: func() int { + return 0 + }, + } + }, + RemoveSetOfDataFromPoolCalled: func(keys [][]byte, id string) {}, + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + if reflect.DeepEqual(key, []byte("tx1_hash")) { + return handler, true + } + return nil, false + }, + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + }, + } + } +} + +func initDataPool(testHash []byte) *mock.PoolsHolderStub { + tx := &transaction.Transaction{Nonce: 10} + sc := &smartContractResult.SmartContractResult{Nonce: 10, SndAddr: []byte("0"), RcvAddr: []byte("1")} + rTx := &rewardTx.RewardTx{Epoch: 0, Round: 1, RcvAddr: []byte("1")} + + txCalled := createShardedDataChacherNotifier(tx, testHash) + unsignedTxHandler := createShardedDataChacherNotifier(sc, testHash) + rewardTxCalled := createShardedDataChacherNotifier(rTx, testHash) + + sdp := &mock.PoolsHolderStub{ + TransactionsCalled: txCalled, + UnsignedTransactionsCalled: unsignedTxHandler, + RewardTransactionsCalled: rewardTxCalled, HeadersNoncesCalled: func() dataRetriever.Uint64SyncMapCacher { return &mock.Uint64SyncMapCacherStub{ MergeCalled: func(u uint64, hashMap dataRetriever.ShardIdHashMap) {}, @@ -156,6 +159,7 @@ func initDataPool(testHash []byte) *mock.PoolsHolderStub { } return sdp } + func containsHash(txHashes [][]byte, hash []byte) bool { for _, txHash := range txHashes { if bytes.Equal(hash, txHash) { @@ -208,7 +212,7 @@ func TestNewTransactionCoordinator_NilShardCoordinator(t *testing.T) { tc, err := NewTransactionCoordinator( nil, &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -224,7 +228,7 @@ func TestNewTransactionCoordinator_NilAccountsStub(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), nil, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -256,7 +260,7 @@ func TestNewTransactionCoordinator_NilRequestHandler(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), nil, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -272,7 +276,7 @@ func TestNewTransactionCoordinator_NilHasher(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, nil, &mock.InterimProcessorContainerMock{}, @@ -288,7 +292,7 @@ func TestNewTransactionCoordinator_NilMarshalizer(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, nil, @@ -304,7 +308,7 @@ func TestNewTransactionCoordinator_OK(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -320,7 +324,7 @@ func TestTransactionCoordinator_SeparateBodyNil(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -338,7 +342,7 @@ func TestTransactionCoordinator_SeparateBody(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, &mock.PreProcessorContainerMock{}, &mock.InterimProcessorContainerMock{}, @@ -378,6 +382,9 @@ func createPreProcessorContainer() process.PreProcessorsContainer { }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -385,12 +392,16 @@ func createPreProcessorContainer() process.PreProcessorsContainer { } func createInterimProcessorContainer() process.IntermediateProcessorContainer { + economicsData := &economics.EconomicsData{} preFactory, _ := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(5), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, initStore(), + initDataPool([]byte("test_hash1")), + economicsData, ) container, _ := preFactory.Create() @@ -414,6 +425,9 @@ func createPreProcessorContainerWithDataPool(dataPool dataRetriever.PoolsHolder) }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -426,7 +440,7 @@ func TestTransactionCoordinator_CreateBlockStarted(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -450,7 +464,7 @@ func TestTransactionCoordinator_CreateMarshalizedDataNilBody(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -489,7 +503,7 @@ func TestTransactionCoordinator_CreateMarshalizedData(t *testing.T) { tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -510,7 +524,7 @@ func TestTransactionCoordinator_CreateMarshalizedDataWithTxsAndScr(t *testing.T) tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), interimContainer, @@ -562,7 +576,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNi tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -575,7 +589,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNi haveTime := func() bool { return true } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(nil, nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 0, len(mbs)) assert.Equal(t, uint32(0), txs) @@ -610,7 +624,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNo tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -623,7 +637,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsDstMeNo haveTime := func() bool { return false } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 0, len(mbs)) assert.Equal(t, uint32(0), txs) @@ -636,7 +650,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothing tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, createPreProcessorContainer(), &mock.InterimProcessorContainerMock{}, @@ -649,7 +663,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactionsNothing haveTime := func() bool { return true } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(createTestMetablock(), nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 0, len(mbs)) assert.Equal(t, uint32(0), txs) @@ -683,6 +697,9 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -712,7 +729,7 @@ func TestTransactionCoordinator_CreateMbsAndProcessCrossShardTransactions(t *tes } } - mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, maxTxRemaining, maxMbRemaining, 10, haveTime) + mbs, txs, finalized := tc.CreateMbsAndProcessCrossShardTransactionsDstMe(metaHdr, nil, maxTxRemaining, maxMbRemaining, 10, haveTime) assert.Equal(t, 1, len(mbs)) assert.Equal(t, uint32(1), txs) @@ -757,6 +774,9 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr UnsignedTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { return shardedCacheMock }, + RewardTransactionsCalled: func() dataRetriever.ShardedDataCacherNotifier { + return shardedCacheMock + }, }, &mock.AddressConverterMock{}, &mock.AccountsStub{}, @@ -768,13 +788,16 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeNothingToPr }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(5), &mock.AccountsStub{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.RequestHandlerMock{}, container, &mock.InterimProcessorContainerMock{}, @@ -886,12 +909,12 @@ func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMe(t *testing func TestTransactionCoordinator_CreateMbsAndProcessTransactionsFromMeMultipleMiniblocks(t *testing.T) { t.Parallel() - txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache}) + nrShards := uint32(5) + txPool, _ := shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100000, Type: storageUnit.LRUCache, Shards: nrShards}) tdp := initDataPool([]byte("tx_hash1")) tdp.TransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return txPool } - nrShards := uint32(5) tc, err := NewTransactionCoordinator( mock.NewMultiShardsCoordinatorMock(nrShards), @@ -1074,7 +1097,7 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //1 tx hash will be in cache @@ -1130,6 +1153,9 @@ func TestTransactionCoordinator_receivedMiniBlockRequestTxs(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1204,10 +1230,9 @@ func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, tc) - nrTxs, mbs, err := tc.RestoreBlockDataFromStorage(nil) + nrTxs, err := tc.RestoreBlockDataFromStorage(nil) assert.Nil(t, err) assert.Equal(t, 0, nrTxs) - assert.Equal(t, 0, len(mbs)) body := block.Body{} miniBlock := &block.MiniBlock{SenderShardID: 1, ReceiverShardID: 0, Type: block.TxBlock, TxHashes: [][]byte{txHash}} @@ -1216,9 +1241,8 @@ func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { tc.RequestBlockTransactions(body) err = tc.SaveBlockDataToStorage(body) assert.Nil(t, err) - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + nrTxs, err = tc.RestoreBlockDataFromStorage(body) assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) assert.Nil(t, err) txHashToAsk := []byte("tx_hashnotinPool") @@ -1228,9 +1252,8 @@ func TestTransactionCoordinator_RestoreBlockDataFromStorage(t *testing.T) { err = tc.SaveBlockDataToStorage(body) assert.Equal(t, process.ErrMissingTransaction, err) - nrTxs, mbs, err = tc.RestoreBlockDataFromStorage(body) + nrTxs, err = tc.RestoreBlockDataFromStorage(body) assert.Equal(t, 1, nrTxs) - assert.Equal(t, 1, len(mbs)) assert.NotNil(t, err) } @@ -1285,6 +1308,9 @@ func TestTransactionCoordinator_ProcessBlockTransactionProcessTxError(t *testing }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1403,6 +1429,9 @@ func TestTransactionCoordinator_RequestMiniblocks(t *testing.T) { }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1438,7 +1467,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //all txs will be in datapool and none of them will return err when processed @@ -1516,6 +1545,9 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithOkTxsShouldExecuteThemAndNot }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1547,7 +1579,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR hasher := mock.HasherMock{} marshalizer := &mock.MarshalizerMock{} - dataPool := mock.NewPoolsHolderFake() + dataPool := mock.NewPoolsHolderMock() //we will have a miniblock that will have 3 tx hashes //all txs will be in datapool and none of them will return err when processed @@ -1620,6 +1652,9 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR }, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + FeeHandlerMock(), ) container, _ := preFactory.Create() @@ -1647,6 +1682,7 @@ func TestShardProcessor_ProcessMiniBlockCompleteWithErrorWhileProcessShouldCallR func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testing.T) { t.Parallel() + economicsData := &economics.EconomicsData{} txHash := []byte("txHash") tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) @@ -1656,7 +1692,10 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi &mock.MarshalizerMock{}, &mock.HasherMock{}, adrConv, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + tdp, + economicsData, ) container, _ := preFactory.Create() @@ -1690,6 +1729,7 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsNilOrMiss(t *testi func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { t.Parallel() + economicsData := &economics.EconomicsData{} txHash := []byte("txHash") tdp := initDataPool(txHash) shardCoordinator := mock.NewMultiShardsCoordinatorMock(5) @@ -1699,7 +1739,10 @@ func TestTransactionCoordinator_VerifyCreatedBlockTransactionsOk(t *testing.T) { &mock.MarshalizerMock{}, &mock.HasherMock{}, adrConv, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + tdp, + economicsData, ) container, _ := preFactory.Create() diff --git a/process/coordinator/transactionType.go b/process/coordinator/transactionType.go index b523d7b6dca..933cb6a353a 100644 --- a/process/coordinator/transactionType.go +++ b/process/coordinator/transactionType.go @@ -4,6 +4,7 @@ import ( "bytes" "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" @@ -47,6 +48,11 @@ func (tc *txTypeHandler) ComputeTransactionType(tx data.TransactionHandler) (pro return process.InvalidTransaction, err } + _, isRewardTx := tx.(*rewardTx.RewardTx) + if isRewardTx { + return process.RewardTx, nil + } + isEmptyAddress := tc.isDestAddressEmpty(tx) if isEmptyAddress { if len(tx.GetData()) > 0 { diff --git a/process/coordinator/transactionType_test.go b/process/coordinator/transactionType_test.go new file mode 100644 index 00000000000..af177bf6cfb --- /dev/null +++ b/process/coordinator/transactionType_test.go @@ -0,0 +1,268 @@ +package coordinator + +import ( + "crypto/rand" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/data/transaction" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/stretchr/testify/assert" +) + +func TestNewTxTypeHandler_NilAddrConv(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewTxTypeHandler_NilShardCoord(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + nil, + &mock.AccountsStub{}, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewTxTypeHandler_NilAccounts(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil, + ) + + assert.Nil(t, tth) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewTxTypeHandler_ValsOk(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) +} + +func generateRandomByteSlice(size int) []byte { + buff := make([]byte, size) + _, _ = rand.Reader.Read(buff) + + return buff +} + +func createAccounts(tx *transaction.Transaction) (state.AccountHandler, state.AccountHandler) { + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + acntSrc, _ := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) + acntSrc.Balance = acntSrc.Balance.Add(acntSrc.Balance, tx.Value) + totalFee := big.NewInt(0) + totalFee = totalFee.Mul(big.NewInt(int64(tx.GasLimit)), big.NewInt(int64(tx.GasPrice))) + acntSrc.Balance = acntSrc.Balance.Add(acntSrc.Balance, totalFee) + + acntDst, _ := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) + + return acntSrc, acntDst +} + +func TestTxTypeHandler_ComputeTransactionTypeNil(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + _, err = tth.ComputeTransactionType(nil) + assert.Equal(t, process.ErrNilTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeNilTx(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + tx.Value = big.NewInt(45) + + tx = nil + _, err = tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrNilTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { + t.Parallel() + + tth, err := NewTxTypeHandler( + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = nil + tx.Value = big.NewInt(45) + + _, err = tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrWrongTransaction, err) +} + +func TestTxTypeHandler_ComputeTransactionTypeScDeployment(t *testing.T) { + t.Parallel() + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = make([]byte, addressConverter.AddressLen()) + tx.Data = "data" + tx.Value = big.NewInt(45) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.SCDeployment, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeScInvoking(t *testing.T) { + t.Parallel() + + addrConverter := &mock.AddressConverterMock{} + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) + tx.Data = "data" + tx.Value = big.NewInt(45) + + _, acntDst := createAccounts(tx) + acntDst.SetCode([]byte("code")) + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.SCInvoking, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeMoveBalance(t *testing.T) { + t.Parallel() + + addrConverter := &mock.AddressConverterMock{} + tx := &transaction.Transaction{} + tx.Nonce = 0 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) + tx.Data = "data" + tx.Value = big.NewInt(45) + + _, acntDst := createAccounts(tx) + + addressConverter := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addressConverter, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + txType, err := tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.MoveBalance, txType) +} + +func TestTxTypeHandler_ComputeTransactionTypeRewardTx(t *testing.T) { + t.Parallel() + + addrConv := &mock.AddressConverterMock{} + tth, err := NewTxTypeHandler( + addrConv, + mock.NewMultiShardsCoordinatorMock(3), + &mock.AccountsStub{}, + ) + + assert.NotNil(t, tth) + assert.Nil(t, err) + + tx := &rewardTx.RewardTx{RcvAddr: []byte("leader")} + txType, err := tth.ComputeTransactionType(tx) + assert.Equal(t, process.ErrWrongTransaction, err) + assert.Equal(t, process.InvalidTransaction, txType) + + tx = &rewardTx.RewardTx{RcvAddr: generateRandomByteSlice(addrConv.AddressLen())} + txType, err = tth.ComputeTransactionType(tx) + assert.Nil(t, err) + assert.Equal(t, process.RewardTx, txType) +} diff --git a/process/dataValidators/txValidator.go b/process/dataValidators/txValidator.go index 18dcb188133..0de9e5a59a6 100644 --- a/process/dataValidators/txValidator.go +++ b/process/dataValidators/txValidator.go @@ -14,13 +14,19 @@ var log = logger.DefaultLogger() // TxValidator represents a tx handler validator that doesn't check the validity of provided txHandler type TxValidator struct { - accounts state.AccountsAdapter - shardCoordinator sharding.Coordinator - rejectedTxs uint64 + accounts state.AccountsAdapter + shardCoordinator sharding.Coordinator + rejectedTxs uint64 + maxNonceDeltaAllowed int } // NewTxValidator creates a new nil tx handler validator instance -func NewTxValidator(accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator) (*TxValidator, error) { +func NewTxValidator( + accounts state.AccountsAdapter, + shardCoordinator sharding.Coordinator, + maxNonceDeltaAllowed int, +) (*TxValidator, error) { + if accounts == nil || accounts.IsInterfaceNil() { return nil, process.ErrNilAccountsAdapter } @@ -29,9 +35,10 @@ func NewTxValidator(accounts state.AccountsAdapter, shardCoordinator sharding.Co } return &TxValidator{ - accounts: accounts, - shardCoordinator: shardCoordinator, - rejectedTxs: uint64(0), + accounts: accounts, + shardCoordinator: shardCoordinator, + rejectedTxs: uint64(0), + maxNonceDeltaAllowed: maxNonceDeltaAllowed, }, nil } @@ -55,7 +62,9 @@ func (tv *TxValidator) IsTxValidForProcessing(interceptedTx process.TxValidatorH accountNonce := accountHandler.GetNonce() txNonce := interceptedTx.Nonce() lowerNonceInTx := txNonce < accountNonce - if lowerNonceInTx { + veryHighNonceInTx := txNonce > accountNonce+uint64(tv.maxNonceDeltaAllowed) + isTxRejected := lowerNonceInTx || veryHighNonceInTx + if isTxRejected { tv.rejectedTxs++ return false } diff --git a/process/dataValidators/txValidator_test.go b/process/dataValidators/txValidator_test.go index de97fe258da..e6f64a75343 100644 --- a/process/dataValidators/txValidator_test.go +++ b/process/dataValidators/txValidator_test.go @@ -59,7 +59,8 @@ func TestTxValidator_NewValidatorNilAccountsShouldErr(t *testing.T) { t.Parallel() shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(nil, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(nil, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, txValidator) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -69,7 +70,8 @@ func TestTxValidator_NewValidatorNilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() accounts := getAccAdapter(0, big.NewInt(0)) - txValidator, err := dataValidators.NewTxValidator(accounts, nil) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, nil, maxNonceDeltaAllowed) assert.Nil(t, txValidator) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -80,7 +82,8 @@ func TestTxValidator_NewValidatorShouldWork(t *testing.T) { accounts := getAccAdapter(0, big.NewInt(0)) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) assert.NotNil(t, txValidator) @@ -94,7 +97,8 @@ func TestTxValidator_IsTxValidForProcessingTxIsCrossShardShouldReturnTrue(t *tes accounts := getAccAdapter(1, big.NewInt(0)) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -112,7 +116,27 @@ func TestTxValidator_IsTxValidForProcessingAccountNonceIsGreaterThanTxNonceShoul accounts := getAccAdapter(accountNonce, big.NewInt(0)) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) + assert.Nil(t, err) + + addressMock := mock.NewAddressMock([]byte("address")) + txValidatorHandler := getTxValidatorHandler(0, txNonce, addressMock, big.NewInt(0)) + + result := txValidator.IsTxValidForProcessing(txValidatorHandler) + assert.Equal(t, false, result) +} + +func TestTxValidator_IsTxValidForProcessingTxNonceIsTooHigh(t *testing.T) { + t.Parallel() + + accountNonce := uint64(100) + maxNonceDeltaAllowed := 100 + txNonce := accountNonce + uint64(maxNonceDeltaAllowed) + 1 + + accounts := getAccAdapter(accountNonce, big.NewInt(0)) + shardCoordinator := createMockCoordinator("_", 0) + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -132,7 +156,8 @@ func TestTxValidator_IsTxValidForProcessingAccountBalanceIsLessThanTxTotalValueS accounts := getAccAdapter(accountNonce, accountBalance) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -152,7 +177,8 @@ func TestTxValidator_IsTxValidForProcessingNumOfRejectedTxShouldIncreaseShouldRe accounts := getAccAdapter(accountNonce, accountBalance) shardCoordinator := createMockCoordinator("_", 0) - txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, err := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) assert.Nil(t, err) addressMock := mock.NewAddressMock([]byte("address")) @@ -173,7 +199,8 @@ func TestTxValidator_IsTxValidForProcessingAccountNotExitsShouldReturnFalse(t *t return nil, errors.New("cannot find account") } shardCoordinator := createMockCoordinator("_", 0) - txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator, maxNonceDeltaAllowed) addressMock := mock.NewAddressMock([]byte("address")) txValidatorHandler := getTxValidatorHandler(0, 1, addressMock, big.NewInt(0)) @@ -190,7 +217,8 @@ func TestTxValidator_IsTxValidForProcessingWrongAccountTypeShouldReturnFalse(t * return &state.MetaAccount{}, nil } shardCoordinator := createMockCoordinator("_", 0) - txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, _ := dataValidators.NewTxValidator(accDB, shardCoordinator, maxNonceDeltaAllowed) addressMock := mock.NewAddressMock([]byte("address")) txValidatorHandler := getTxValidatorHandler(0, 1, addressMock, big.NewInt(0)) @@ -206,7 +234,8 @@ func TestTxValidator_IsTxValidForProcessingTxIsOkShouldReturnTrue(t *testing.T) accountBalance := big.NewInt(10) accounts := getAccAdapter(accountNonce, accountBalance) shardCoordinator := createMockCoordinator("_", 0) - txValidator, _ := dataValidators.NewTxValidator(accounts, shardCoordinator) + maxNonceDeltaAllowed := 100 + txValidator, _ := dataValidators.NewTxValidator(accounts, shardCoordinator, maxNonceDeltaAllowed) addressMock := mock.NewAddressMock([]byte("address")) txValidatorHandler := getTxValidatorHandler(0, 1, addressMock, big.NewInt(0)) diff --git a/process/economics/economicsData.go b/process/economics/economicsData.go new file mode 100644 index 00000000000..fb796a8e2f7 --- /dev/null +++ b/process/economics/economicsData.go @@ -0,0 +1,152 @@ +package economics + +import ( + "math" + "math/big" + "strconv" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/process" +) + +// EconomicsData will store information about economics +type EconomicsData struct { + rewardsValue *big.Int + communityPercentage float64 + leaderPercentage float64 + burnPercentage float64 + minGasPrice uint64 + minGasLimit uint64 + communityAddress string + burnAddress string +} + +const float64EqualityThreshold = 1e-9 + +// NewEconomicsData will create and object with information about economics parameters +func NewEconomicsData(economics *config.ConfigEconomics) (*EconomicsData, error) { + //TODO check what happens if addresses are wrong + rewardsValue, minGasPrice, minGasLimit, err := convertValues(economics) + if err != nil { + return nil, err + } + + notGreaterThanZero := rewardsValue.Cmp(big.NewInt(0)) + if notGreaterThanZero < 0 { + return nil, process.ErrInvalidRewardsValue + } + + err = checkValues(economics) + if err != nil { + return nil, err + } + + return &EconomicsData{ + rewardsValue: rewardsValue, + communityPercentage: economics.RewardsSettings.CommunityPercentage, + leaderPercentage: economics.RewardsSettings.LeaderPercentage, + burnPercentage: economics.RewardsSettings.BurnPercentage, + minGasPrice: minGasPrice, + minGasLimit: minGasLimit, + communityAddress: economics.EconomicsAddresses.CommunityAddress, + burnAddress: economics.EconomicsAddresses.BurnAddress, + }, nil +} + +func convertValues(economics *config.ConfigEconomics) (*big.Int, uint64, uint64, error) { + conversionBase := 10 + bitConversionSize := 64 + + rewardsValue := new(big.Int) + rewardsValue, ok := rewardsValue.SetString(economics.RewardsSettings.RewardsValue, conversionBase) + if !ok { + return nil, 0, 0, process.ErrInvalidRewardsValue + } + + minGasPrice, err := strconv.ParseUint(economics.FeeSettings.MinGasPrice, conversionBase, bitConversionSize) + if err != nil { + return nil, 0, 0, process.ErrInvalidMinimumGasPrice + } + + minGasLimit, err := strconv.ParseUint(economics.FeeSettings.MinGasLimit, conversionBase, bitConversionSize) + if err != nil { + return nil, 0, 0, process.ErrInvalidMinimumGasLimitForTx + } + + return rewardsValue, minGasPrice, minGasLimit, nil +} + +func checkValues(economics *config.ConfigEconomics) error { + if isPercentageInvalid(economics.RewardsSettings.BurnPercentage) || + isPercentageInvalid(economics.RewardsSettings.CommunityPercentage) || + isPercentageInvalid(economics.RewardsSettings.LeaderPercentage) { + return process.ErrInvalidRewardsPercentages + } + + sumPercentage := economics.RewardsSettings.BurnPercentage + sumPercentage += economics.RewardsSettings.CommunityPercentage + sumPercentage += economics.RewardsSettings.LeaderPercentage + isEqualsToOne := math.Abs(sumPercentage-1.0) <= float64EqualityThreshold + if !isEqualsToOne { + return process.ErrInvalidRewardsPercentages + } + + return nil +} + +func isPercentageInvalid(percentage float64) bool { + isLessThanZero := percentage < 0.0 + isGreaterThanOne := percentage > 1.0 + if isLessThanZero || isGreaterThanOne { + return true + } + return false +} + +// RewardsValue will return rewards value +func (ed *EconomicsData) RewardsValue() *big.Int { + return ed.rewardsValue +} + +// CommunityPercentage will return community reward percentage +func (ed *EconomicsData) CommunityPercentage() float64 { + return ed.communityPercentage +} + +// LeaderPercentage will return leader reward percentage +func (ed *EconomicsData) LeaderPercentage() float64 { + return ed.leaderPercentage +} + +// BurnPercentage will return burn percentage +func (ed *EconomicsData) BurnPercentage() float64 { + return ed.burnPercentage +} + +// MinGasPrice will return minimum gas price +func (ed *EconomicsData) MinGasPrice() uint64 { + return ed.minGasPrice +} + +// MinGasLimit will return minimum gas limit +func (ed *EconomicsData) MinGasLimit() uint64 { + return ed.minGasLimit +} + +// CommunityAddress will return community address +func (ed *EconomicsData) CommunityAddress() string { + return ed.communityAddress +} + +// BurnAddress will return burn address +func (ed *EconomicsData) BurnAddress() string { + return ed.burnAddress +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ed *EconomicsData) IsInterfaceNil() bool { + if ed == nil { + return true + } + return false +} diff --git a/process/economics/economicsData_test.go b/process/economics/economicsData_test.go new file mode 100644 index 00000000000..38d68a298c7 --- /dev/null +++ b/process/economics/economicsData_test.go @@ -0,0 +1,262 @@ +package economics_test + +import ( + "math/big" + "strconv" + "testing" + + "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/stretchr/testify/assert" +) + +func createDummyEconomicsConfig() *config.ConfigEconomics { + return &config.ConfigEconomics{ + EconomicsAddresses: config.EconomicsAddresses{ + CommunityAddress: "addr1", + BurnAddress: "addr2", + }, + RewardsSettings: config.RewardsSettings{ + RewardsValue: "1000000000000000000000000000000000", + CommunityPercentage: 0.1, + LeaderPercentage: 0.1, + BurnPercentage: 0.8, + }, + FeeSettings: config.FeeSettings{ + MinGasPrice: "18446744073709551615", + MinGasLimit: "500", + }, + } +} + +func TestNewEconomicsData_InvalidRewardsValueShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + badRewardsValues := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + } + + for _, rewardsValue := range badRewardsValues { + economicsConfig.RewardsSettings.RewardsValue = rewardsValue + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsValue, err) + } +} + +func TestNewEconomicsData_InvalidMinGasPriceShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + badGasPrice := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, gasPrice := range badGasPrice { + economicsConfig.FeeSettings.MinGasPrice = gasPrice + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidMinimumGasPrice, err) + } + +} + +func TestNewEconomicsData_InvalidMinGasLimitShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + bagMinGasLimit := []string{ + "-1", + "-100000000000000000000", + "badValue", + "", + "#########", + "11112S", + "1111O0000", + "10ERD", + "10000000000000000000000000000000000000000000000000000000000000", + } + + for _, minGasLimit := range bagMinGasLimit { + economicsConfig.FeeSettings.MinGasLimit = minGasLimit + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidMinimumGasLimitForTx, err) + } + +} + +func TestNewEconomicsData_InvalidBurnPercentageShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = -1.0 + economicsConfig.RewardsSettings.CommunityPercentage = 0.1 + economicsConfig.RewardsSettings.LeaderPercentage = 0.1 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} + +func TestNewEconomicsData_InvalidCommunityPercentageShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = 0.1 + economicsConfig.RewardsSettings.CommunityPercentage = -0.1 + economicsConfig.RewardsSettings.LeaderPercentage = 0.1 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} + +func TestNewEconomicsData_InvalidLeaderPercentageShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = 0.1 + economicsConfig.RewardsSettings.CommunityPercentage = 0.1 + economicsConfig.RewardsSettings.LeaderPercentage = -0.1 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} +func TestNewEconomicsData_InvalidRewardsPercentageSumShouldErr(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = 0.5 + economicsConfig.RewardsSettings.CommunityPercentage = 0.2 + economicsConfig.RewardsSettings.LeaderPercentage = 0.5 + + _, err := economics.NewEconomicsData(economicsConfig) + assert.Equal(t, process.ErrInvalidRewardsPercentages, err) + +} + +func TestNewEconomicsData_ShouldWork(t *testing.T) { + t.Parallel() + + economicsConfig := createDummyEconomicsConfig() + economicsData, _ := economics.NewEconomicsData(economicsConfig) + assert.NotNil(t, economicsData) +} + +func TestEconomicsData_RewardsValue(t *testing.T) { + t.Parallel() + + rewardsValue := int64(100) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.RewardsValue = strconv.FormatInt(rewardsValue, 10) + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.RewardsValue() + assert.Equal(t, big.NewInt(rewardsValue), value) +} + +func TestEconomicsData_CommunityPercentage(t *testing.T) { + t.Parallel() + + communityPercentage := 0.50 + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.CommunityPercentage = communityPercentage + economicsConfig.RewardsSettings.BurnPercentage = 0.2 + economicsConfig.RewardsSettings.LeaderPercentage = 0.3 + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.CommunityPercentage() + assert.Equal(t, communityPercentage, value) +} + +func TestEconomicsData_LeaderPercentage(t *testing.T) { + t.Parallel() + + leaderPercentage := 0.40 + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.CommunityPercentage = 0.30 + economicsConfig.RewardsSettings.BurnPercentage = 0.30 + economicsConfig.RewardsSettings.LeaderPercentage = leaderPercentage + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.LeaderPercentage() + assert.Equal(t, leaderPercentage, value) +} + +func TestEconomicsData_BurnPercentage(t *testing.T) { + t.Parallel() + + burnPercentage := 0.41 + economicsConfig := createDummyEconomicsConfig() + economicsConfig.RewardsSettings.BurnPercentage = burnPercentage + economicsConfig.RewardsSettings.CommunityPercentage = 0.29 + economicsConfig.RewardsSettings.LeaderPercentage = 0.3 + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.BurnPercentage() + assert.Equal(t, burnPercentage, value) +} + +func TestEconomicsData_MinGasPrice(t *testing.T) { + t.Parallel() + + minGasPrice := uint64(500) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.FeeSettings.MinGasPrice = strconv.FormatUint(minGasPrice, 10) + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.MinGasPrice() + assert.Equal(t, minGasPrice, value) +} + +func TestEconomicsData_MinGasLimit(t *testing.T) { + t.Parallel() + + minGasLimit := uint64(1000) + economicsConfig := createDummyEconomicsConfig() + economicsConfig.FeeSettings.MinGasLimit = strconv.FormatUint(minGasLimit, 10) + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.MinGasLimit() + assert.Equal(t, minGasLimit, value) +} + +func TestEconomicsData_CommunityAddress(t *testing.T) { + t.Parallel() + + communityAddress := "addr1" + economicsConfig := createDummyEconomicsConfig() + economicsConfig.EconomicsAddresses.CommunityAddress = communityAddress + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.CommunityAddress() + assert.Equal(t, communityAddress, value) +} + +func TestEconomicsData_BurnAddress(t *testing.T) { + t.Parallel() + + burnAddress := "addr2" + economicsConfig := createDummyEconomicsConfig() + economicsConfig.EconomicsAddresses.BurnAddress = burnAddress + economicsData, _ := economics.NewEconomicsData(economicsConfig) + + value := economicsData.BurnAddress() + assert.Equal(t, burnAddress, value) +} diff --git a/process/errors.go b/process/errors.go index c3ed64a48d2..7cca2818df1 100644 --- a/process/errors.go +++ b/process/errors.go @@ -91,12 +91,9 @@ var ErrNilRootHash = errors.New("root hash is nil") // ErrWrongNonceInBlock signals the nonce in block is different than expected nonce var ErrWrongNonceInBlock = errors.New("wrong nonce in block") -// ErrBlockHashDoesNotMatch signals the hash of the block is not matching with the previous one +// ErrBlockHashDoesNotMatch signals that header hash does not match with the previous one var ErrBlockHashDoesNotMatch = errors.New("block hash does not match") -// ErrHashDoesNotMatchInOtherChainBlock signals that header hash for one shard is not match with the previous one -var ErrHashDoesNotMatchInOtherChainBlock = errors.New("block hash does not match with the last committed for this shard") - // ErrMissingTransaction signals that one transaction is missing var ErrMissingTransaction = errors.New("missing transaction") @@ -106,8 +103,8 @@ var ErrMarshalWithoutSuccess = errors.New("marshal without success") // ErrUnmarshalWithoutSuccess signals that unmarshal some data was not done with success var ErrUnmarshalWithoutSuccess = errors.New("unmarshal without success") -// ErrRootStateMissmatch signals that persist some data was not done with success -var ErrRootStateMissmatch = errors.New("root state does not match") +// ErrRootStateDoesNotMatch signals that root state does not match +var ErrRootStateDoesNotMatch = errors.New("root state does not match") // ErrAccountStateDirty signals that the accounts were modified before starting the current modification var ErrAccountStateDirty = errors.New("accountState was dirty before starting to change") @@ -139,6 +136,9 @@ var ErrNilMessenger = errors.New("nil Messenger") // ErrNilTxDataPool signals that a nil transaction pool has been provided var ErrNilTxDataPool = errors.New("nil transaction data pool") +// ErrEmptyTxDataPool signals that a empty transaction pool has been provided +var ErrEmptyTxDataPool = errors.New("empty transaction data pool") + // ErrNilHeadersDataPool signals that a nil headers pool has been provided var ErrNilHeadersDataPool = errors.New("nil headers data pool") @@ -169,6 +169,9 @@ var ErrNegativeValue = errors.New("negative value") // ErrNilShardCoordinator signals that an operation has been attempted to or with a nil shard coordinator var ErrNilShardCoordinator = errors.New("nil shard coordinator") +// ErrNilNodesCoordinator signals that an operation has been attempted to or with a nil nodes coordinator +var ErrNilNodesCoordinator = errors.New("nil nodes coordinator") + // ErrInvalidRcvAddr signals that an operation has been attempted to or with an invalid receiver address var ErrInvalidRcvAddr = errors.New("invalid receiver address") @@ -181,6 +184,9 @@ var ErrNilKeyGen = errors.New("nil key generator") // ErrNilSingleSigner signals that a nil single signer is used var ErrNilSingleSigner = errors.New("nil single signer") +// ErrBlockProposerSignatureMissing signals that block proposer signature is missing from the block aggregated sig +var ErrBlockProposerSignatureMissing = errors.New("block proposer signature is missing") + // ErrNilMultiSigVerifier signals that a nil multi-signature verifier is used var ErrNilMultiSigVerifier = errors.New("nil multi-signature verifier") @@ -202,9 +208,6 @@ var ErrNilStorage = errors.New("nil storage") // ErrNilShardedDataCacherNotifier signals that a nil sharded data cacher notifier has been provided var ErrNilShardedDataCacherNotifier = errors.New("nil sharded data cacher notifier") -// ErrNilBlocksTracker signals that a nil blocks tracker has been provided -var ErrNilBlocksTracker = errors.New("nil blocks tracker") - // ErrInvalidTxInPool signals an invalid transaction in the transactions pool var ErrInvalidTxInPool = errors.New("invalid transaction in the transactions pool") @@ -229,6 +232,9 @@ var ErrNilMiniBlockPool = errors.New("nil mini block pool") // ErrNilMetaBlockPool signals that a nil meta blocks pool was used var ErrNilMetaBlockPool = errors.New("nil meta block pool") +// ErrNilShardBlockPool signals that a nil shard blocks pool was used +var ErrNilShardBlockPool = errors.New("nil shard block pool") + // ErrNilTxProcessor signals that a nil transactions processor was used var ErrNilTxProcessor = errors.New("nil transactions processor") @@ -256,6 +262,9 @@ var ErrNilResolverContainer = errors.New("nil resolver container") // ErrNilRequestHandler signals that a nil request handler interface was provided var ErrNilRequestHandler = errors.New("nil request handler") +// ErrNilInternalTransactionProducer signals that a nil system transactions producer was provided +var ErrNilInternalTransactionProducer = errors.New("nil internal transaction producere") + // ErrNilHaveTimeHandler signals that a nil have time handler func was provided var ErrNilHaveTimeHandler = errors.New("nil have time handler") @@ -280,9 +289,6 @@ var ErrNoTransactionInMessage = errors.New("no transaction found in received mes // ErrNilBuffer signals that a provided byte buffer is nil var ErrNilBuffer = errors.New("provided byte buffer is nil") -// ErrNilChronologyValidator signals that a nil chronology validator has been provided -var ErrNilChronologyValidator = errors.New("provided chronology validator object is nil") - // ErrNilRandSeed signals that a nil rand seed has been provided var ErrNilRandSeed = errors.New("provided rand seed is nil") @@ -292,14 +298,11 @@ var ErrNilPrevRandSeed = errors.New("provided previous rand seed is nil") // ErrNilRequestHeaderHandlerByNonce signals that a nil header request handler by nonce func was provided var ErrNilRequestHeaderHandlerByNonce = errors.New("nil request header handler by nonce") -// ErrLowerRoundInOtherChainBlock signals that header round for one shard is too low for processing it -var ErrLowerRoundInOtherChainBlock = errors.New("header round is lower than last committed for this shard") - -// ErrLowerRoundInBlock signals that a header round is too low for processing +// ErrLowerRoundInBlock signals that a header round is too low for processing it var ErrLowerRoundInBlock = errors.New("header round is lower than last committed") -// ErrRandSeedMismatch signals that random seeds are not equal -var ErrRandSeedMismatch = errors.New("random seeds do not match") +// ErrRandSeedDoesNotMatch signals that random seed does not match with the previous one +var ErrRandSeedDoesNotMatch = errors.New("random seed do not match") // ErrHeaderNotFinal signals that header is not final and it should be var ErrHeaderNotFinal = errors.New("header in metablock is not final") @@ -352,6 +355,9 @@ var ErrNilVMOutput = errors.New("nil vm output") // ErrNilBalanceFromSC signals that balance is nil var ErrNilBalanceFromSC = errors.New("output balance from VM is nil") +// ErrNilValueFromRewardTransaction signals that the transfered value is nil +var ErrNilValueFromRewardTransaction = errors.New("transferred value is nil in reward transaction") + // ErrNilTemporaryAccountsHandler signals that temporary accounts handler is nil var ErrNilTemporaryAccountsHandler = errors.New("temporary accounts handler is nil") @@ -361,18 +367,33 @@ var ErrNotEnoughValidBlocksInStorage = errors.New("not enough valid blocks in st // ErrNilSmartContractResult signals that the smart contract result is nil var ErrNilSmartContractResult = errors.New("smart contract result is nil") +// ErrNilRewardTransaction signals that the reward transaction is nil +var ErrNilRewardTransaction = errors.New("reward transaction is nil") + +// ErrRewardTransactionNotFound is raised when reward transaction should be present but was not found +var ErrRewardTransactionNotFound = errors.New("reward transaction not found") + // ErrInvalidDataInput signals that the data input is invalid for parsing var ErrInvalidDataInput = errors.New("data input is invalid to create key, value storage output") // ErrNoUnsignedTransactionInMessage signals that message does not contain required data var ErrNoUnsignedTransactionInMessage = errors.New("no unsigned transactions in message") +// ErrNoRewardTransactionInMessage signals that message does not contain required data +var ErrNoRewardTransactionInMessage = errors.New("no reward transactions in message") + // ErrNilUTxDataPool signals that unsigned transaction pool is nil var ErrNilUTxDataPool = errors.New("unsigned transactions pool is nil") +// ErrNilRewardTxDataPool signals that the reward transactions pool is nil +var ErrNilRewardTxDataPool = errors.New("reward transactions pool is nil") + // ErrNilUTxStorage signals that unsigned transaction storage is nil var ErrNilUTxStorage = errors.New("unsigned transactions storage is nil") +// ErrNilRewardsTxStorage signals that rewards transaction storage is nil +var ErrNilRewardsTxStorage = errors.New("reward transactions storage is nil") + // ErrNilScAddress signals that a nil smart contract address has been provided var ErrNilScAddress = errors.New("nil SC address") @@ -397,6 +418,9 @@ var ErrNilUint64Converter = errors.New("unit64converter is nil") // ErrNilSmartContractResultProcessor signals that smart contract result processor is nil var ErrNilSmartContractResultProcessor = errors.New("nil smart contract result processor") +// ErrNilRewardsTxProcessor signals that the rewards transaction processor is nil +var ErrNilRewardsTxProcessor = errors.New("nil rewards transaction processor") + // ErrNilIntermediateProcessorContainer signals that intermediate processors container is nil var ErrNilIntermediateProcessorContainer = errors.New("intermediate processor container is nil") @@ -418,6 +442,27 @@ var ErrNilHeaderHandlerValidator = errors.New("nil header handler validator prov // ErrNilAppStatusHandler defines the error for setting a nil AppStatusHandler var ErrNilAppStatusHandler = errors.New("nil AppStatusHandler") +// ErrNotEnoughFeeInTransactions signals that the transaction does not enough fee +var ErrNotEnoughFeeInTransactions = errors.New("transaction fee is not enough") + +// ErrNilUnsignedTxHandler signals that the unsigned tx handler is nil +var ErrNilUnsignedTxHandler = errors.New("nil unsigned tx handler") + +// ErrRewardTxsDoNotMatch signals that reward txs do not match +var ErrRewardTxsDoNotMatch = errors.New("calculated reward tx with block reward tx does not match") + +// ErrRewardTxNotFound signals that the reward transaction was not found +var ErrRewardTxNotFound = errors.New("reward transaction not found") + +// ErrRewardTxsMismatchCreatedReceived signals a mismatch between the nb of created and received reward transactions +var ErrRewardTxsMismatchCreatedReceived = errors.New("mismatch between created and received reward transactions") + +// ErrNilTxTypeHandler signals that tx type handler is nil +var ErrNilTxTypeHandler = errors.New("nil tx type handler") + +// ErrNilSpecialAddressHandler signals that special address handler is nil +var ErrNilSpecialAddressHandler = errors.New("nil special address handler") + // ErrNotEnoughArgumentsToDeploy signals that there are not enough arguments to deploy the smart contract var ErrNotEnoughArgumentsToDeploy = errors.New("not enough arguments to deploy the smart contract") @@ -426,3 +471,39 @@ var ErrVMTypeLengthInvalid = errors.New("vm type length is too long") // ErrOverallBalanceChangeFromSC signals that all sumed balance changes are not zero var ErrOverallBalanceChangeFromSC = errors.New("SC output balance updates are wrong") + +// ErrNilTxsPoolsCleaner signals that a nil transactions pools cleaner has been provided +var ErrNilTxsPoolsCleaner = errors.New("nil transactions pools cleaner") + +// ErrZeroMaxCleanTime signals that cleaning time for pools is less or equal with 0 +var ErrZeroMaxCleanTime = errors.New("cleaning time is equal or less than zero") + +// ErrNilEconomicsRewardsHandler signals that rewards handler is nil +var ErrNilEconomicsRewardsHandler = errors.New("nil economics rewards handler") + +// ErrNilEconomicsFeeHandler signals that fee handler is nil +var ErrNilEconomicsFeeHandler = errors.New("nil economics fee handler") + +// ErrNilThrottler signals that a nil throttler has been provided +var ErrNilThrottler = errors.New("nil throttler") + +// ErrSystemBusy signals that the system is busy +var ErrSystemBusy = errors.New("system busy") + +// ErrInsufficientGasPriceInTx signals that a lower gas price than required was provided +var ErrInsufficientGasPriceInTx = errors.New("insufficient gas price in tx") + +// ErrInsufficientGasLimitInTx signals that a lower gas limit than required was provided +var ErrInsufficientGasLimitInTx = errors.New("insufficient gas limit in tx") + +// ErrInvalidMinimumGasPrice signals that an invalid gas price has been read from config file +var ErrInvalidMinimumGasPrice = errors.New("invalid minimum gas price") + +// ErrInvalidMinimumGasLimitForTx signals that an invalid minimum gas limit for transactions has been read from config file +var ErrInvalidMinimumGasLimitForTx = errors.New("invalid minimum gas limit for transactions") + +// ErrInvalidRewardsValue signals that an invalid rewards value has been read from config file +var ErrInvalidRewardsValue = errors.New("invalid rewards value") + +// ErrInvalidRewardsPercentages signal that rewards percentages are not correct +var ErrInvalidRewardsPercentages = errors.New("invalid rewards percentages") diff --git a/process/factory/factory.go b/process/factory/factory.go index d65c656aca7..5a5fa359840 100644 --- a/process/factory/factory.go +++ b/process/factory/factory.go @@ -5,6 +5,8 @@ const ( TransactionTopic = "transactions" // UnsignedTransactionTopic is the topic used for sharing unsigned transactions UnsignedTransactionTopic = "unsignedTransactions" + // RewardsTransactionTopic is the topic used for sharing fee transactions + RewardsTransactionTopic = "rewardsTransactions" // HeadersTopic is the topic used for sharing block headers HeadersTopic = "headers" // MiniBlocksTopic is the topic used for sharing mini blocks diff --git a/process/factory/metachain/interceptorsContainerFactory.go b/process/factory/metachain/interceptorsContainerFactory.go index 80338b1ddd4..b07dd0f535d 100644 --- a/process/factory/metachain/interceptorsContainerFactory.go +++ b/process/factory/metachain/interceptorsContainerFactory.go @@ -15,32 +15,35 @@ import ( ) type interceptorsContainerFactory struct { - marshalizer marshal.Marshalizer - hasher hashing.Hasher - store dataRetriever.StorageService - dataPool dataRetriever.MetaPoolsHolder - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - multiSigner crypto.MultiSigner - chronologyValidator process.ChronologyValidator - tpsBenchmark *statistics.TpsBenchmark + marshalizer marshal.Marshalizer + hasher hashing.Hasher + store dataRetriever.StorageService + dataPool dataRetriever.MetaPoolsHolder + shardCoordinator sharding.Coordinator + nodesCoordinator sharding.NodesCoordinator + messenger process.TopicHandler + multiSigner crypto.MultiSigner + tpsBenchmark *statistics.TpsBenchmark } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, messenger process.TopicHandler, store dataRetriever.StorageService, marshalizer marshal.Marshalizer, hasher hashing.Hasher, multiSigner crypto.MultiSigner, dataPool dataRetriever.MetaPoolsHolder, - chronologyValidator process.ChronologyValidator, ) (*interceptorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator + } if messenger == nil { return nil, process.ErrNilMessenger } @@ -59,19 +62,16 @@ func NewInterceptorsContainerFactory( if dataPool == nil || dataPool.IsInterfaceNil() { return nil, process.ErrNilDataPoolHolder } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator - } return &interceptorsContainerFactory{ - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - multiSigner: multiSigner, - dataPool: dataPool, - chronologyValidator: chronologyValidator, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + multiSigner: multiSigner, + dataPool: dataPool, }, nil } @@ -134,7 +134,7 @@ func (icf *interceptorsContainerFactory) generateMetablockInterceptor() ([]strin icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err @@ -187,7 +187,7 @@ func (icf *interceptorsContainerFactory) createOneShardHeaderInterceptor(identif icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, err diff --git a/process/factory/metachain/interceptorsContainerFactory_test.go b/process/factory/metachain/interceptorsContainerFactory_test.go index b08e783cd8e..6cca5fc63f8 100644 --- a/process/factory/metachain/interceptorsContainerFactory_test.go +++ b/process/factory/metachain/interceptorsContainerFactory_test.go @@ -78,31 +78,49 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing icf, err := metachain.NewInterceptorsContainerFactory( nil, + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + icf, err := metachain.NewInterceptorsContainerFactory( + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewMultiSigner(), + createDataPools(), + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) +} + func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { t.Parallel() icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), nil, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -114,13 +132,13 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -132,13 +150,13 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), nil, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -150,13 +168,13 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, nil, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -168,13 +186,13 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, nil, createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -186,13 +204,13 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), nil, - &mock.ChronologyValidatorStub{}, ) assert.Nil(t, icf) @@ -204,13 +222,13 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { icf, err := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) assert.NotNil(t, icf) @@ -224,13 +242,13 @@ func TestInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *tes icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MetachainBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -244,13 +262,13 @@ func TestInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsSh icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.ShardHeadersForMetachainTopic, ""), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -264,13 +282,13 @@ func TestInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr( icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MetachainBlocksTopic), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -284,13 +302,13 @@ func TestInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFail icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.ShardHeadersForMetachainTopic), createStore(), &mock.MarshalizerMock{}, &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -304,6 +322,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := metachain.NewInterceptorsContainerFactory( mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -317,7 +336,6 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, err := icf.Create() @@ -335,8 +353,16 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.SetNoShards(uint32(noOfShards)) shardCoordinator.CurrentShard = 1 + nodesCoordinator := &mock.NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, + } + icf, _ := metachain.NewInterceptorsContainerFactory( shardCoordinator, + nodesCoordinator, &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -350,7 +376,6 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { &mock.HasherMock{}, mock.NewMultiSigner(), createDataPools(), - &mock.ChronologyValidatorStub{}, ) container, _ := icf.Create() diff --git a/process/factory/shard/interceptorsContainerFactory.go b/process/factory/shard/interceptorsContainerFactory.go index 302eed9633b..1a905cad128 100644 --- a/process/factory/shard/interceptorsContainerFactory.go +++ b/process/factory/shard/interceptorsContainerFactory.go @@ -1,6 +1,7 @@ package shard import ( + "github.com/ElrondNetwork/elrond-go/core/throttler" "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/dataRetriever" @@ -11,30 +12,37 @@ import ( "github.com/ElrondNetwork/elrond-go/process/dataValidators" "github.com/ElrondNetwork/elrond-go/process/factory" "github.com/ElrondNetwork/elrond-go/process/factory/containers" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/ElrondNetwork/elrond-go/process/unsigned" "github.com/ElrondNetwork/elrond-go/sharding" ) +const maxGoRoutineTxInterceptor = 100 + type interceptorsContainerFactory struct { - accounts state.AccountsAdapter - shardCoordinator sharding.Coordinator - messenger process.TopicHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - keyGen crypto.KeyGenerator - singleSigner crypto.SingleSigner - multiSigner crypto.MultiSigner - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - chronologyValidator process.ChronologyValidator + accounts state.AccountsAdapter + shardCoordinator sharding.Coordinator + messenger process.TopicHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + keyGen crypto.KeyGenerator + singleSigner crypto.SingleSigner + multiSigner crypto.MultiSigner + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + nodesCoordinator sharding.NodesCoordinator + txInterceptorThrottler process.InterceptorThrottler + maxTxNonceDeltaAllowed int + txFeeHandler process.FeeHandler } // NewInterceptorsContainerFactory is responsible for creating a new interceptors factory object func NewInterceptorsContainerFactory( accounts state.AccountsAdapter, shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, messenger process.TopicHandler, store dataRetriever.StorageService, marshalizer marshal.Marshalizer, @@ -44,7 +52,8 @@ func NewInterceptorsContainerFactory( multiSigner crypto.MultiSigner, dataPool dataRetriever.PoolsHolder, addrConverter state.AddressConverter, - chronologyValidator process.ChronologyValidator, + maxTxNonceDeltaAllowed int, + txFeeHandler process.FeeHandler, ) (*interceptorsContainerFactory, error) { if accounts == nil || accounts.IsInterfaceNil() { return nil, process.ErrNilAccountsAdapter @@ -79,23 +88,34 @@ func NewInterceptorsContainerFactory( if addrConverter == nil || addrConverter.IsInterfaceNil() { return nil, process.ErrNilAddressConverter } - if chronologyValidator == nil || chronologyValidator.IsInterfaceNil() { - return nil, process.ErrNilChronologyValidator + if nodesCoordinator == nil || nodesCoordinator.IsInterfaceNil() { + return nil, process.ErrNilNodesCoordinator + } + if txFeeHandler == nil || txFeeHandler.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } + + txInterceptorThrottler, err := throttler.NewNumGoRoutineThrottler(maxGoRoutineTxInterceptor) + if err != nil { + return nil, err } return &interceptorsContainerFactory{ - accounts: accounts, - shardCoordinator: shardCoordinator, - messenger: messenger, - store: store, - marshalizer: marshalizer, - hasher: hasher, - keyGen: keyGen, - singleSigner: singleSigner, - multiSigner: multiSigner, - dataPool: dataPool, - addrConverter: addrConverter, - chronologyValidator: chronologyValidator, + accounts: accounts, + shardCoordinator: shardCoordinator, + nodesCoordinator: nodesCoordinator, + messenger: messenger, + store: store, + marshalizer: marshalizer, + hasher: hasher, + keyGen: keyGen, + singleSigner: singleSigner, + multiSigner: multiSigner, + dataPool: dataPool, + addrConverter: addrConverter, + txInterceptorThrottler: txInterceptorThrottler, + maxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, + txFeeHandler: txFeeHandler, }, nil } @@ -123,6 +143,16 @@ func (icf *interceptorsContainerFactory) Create() (process.InterceptorsContainer return nil, err } + keys, interceptorSlice, err = icf.generateRewardTxInterceptors() + if err != nil { + return nil, err + } + + err = container.AddMultiple(keys, interceptorSlice) + if err != nil { + return nil, err + } + keys, interceptorSlice, err = icf.generateHdrInterceptor() if err != nil { return nil, err @@ -216,7 +246,7 @@ func (icf *interceptorsContainerFactory) generateTxInterceptors() ([]string, []p } func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier string) (process.Interceptor, error) { - txValidator, err := dataValidators.NewTxValidator(icf.accounts, icf.shardCoordinator) + txValidator, err := dataValidators.NewTxValidator(icf.accounts, icf.shardCoordinator, icf.maxTxNonceDeltaAllowed) if err != nil { return nil, err } @@ -229,7 +259,64 @@ func (icf *interceptorsContainerFactory) createOneTxInterceptor(identifier strin icf.hasher, icf.singleSigner, icf.keyGen, - icf.shardCoordinator) + icf.shardCoordinator, + icf.txInterceptorThrottler, + icf.txFeeHandler, + ) + + if err != nil { + return nil, err + } + + return icf.createTopicAndAssignHandler(identifier, interceptor, true) +} + +//------- Reward transactions interceptors + +func (icf *interceptorsContainerFactory) generateRewardTxInterceptors() ([]string, []process.Interceptor, error) { + shardC := icf.shardCoordinator + + noOfShards := shardC.NumberOfShards() + + keys := make([]string, noOfShards) + interceptorSlice := make([]process.Interceptor, noOfShards) + + for idx := uint32(0); idx < noOfShards; idx++ { + identifierScr := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(idx) + + interceptor, err := icf.createOneRewardTxInterceptor(identifierScr) + if err != nil { + return nil, nil, err + } + + keys[int(idx)] = identifierScr + interceptorSlice[int(idx)] = interceptor + } + + identifierTx := factory.RewardsTransactionTopic + shardC.CommunicationIdentifier(sharding.MetachainShardId) + + interceptor, err := icf.createOneRewardTxInterceptor(identifierTx) + if err != nil { + return nil, nil, err + } + + keys = append(keys, identifierTx) + interceptorSlice = append(interceptorSlice, interceptor) + + return keys, interceptorSlice, nil +} + +func (icf *interceptorsContainerFactory) createOneRewardTxInterceptor(identifier string) (process.Interceptor, error) { + rewardTxStorer := icf.store.GetStorer(dataRetriever.RewardTransactionUnit) + + interceptor, err := rewardTransaction.NewRewardTxInterceptor( + icf.marshalizer, + icf.dataPool.RewardTransactions(), + rewardTxStorer, + icf.addrConverter, + icf.hasher, + icf.shardCoordinator, + ) if err != nil { return nil, err @@ -311,7 +398,7 @@ func (icf *interceptorsContainerFactory) generateHdrInterceptor() ([]string, []p icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err @@ -411,7 +498,7 @@ func (icf *interceptorsContainerFactory) generateMetachainHeaderInterceptor() ([ icf.multiSigner, icf.hasher, icf.shardCoordinator, - icf.chronologyValidator, + icf.nodesCoordinator, ) if err != nil { return nil, nil, err diff --git a/process/factory/shard/interceptorsContainerFactory_test.go b/process/factory/shard/interceptorsContainerFactory_test.go index 19efae2c75d..c76c31e3ce6 100644 --- a/process/factory/shard/interceptorsContainerFactory_test.go +++ b/process/factory/shard/interceptorsContainerFactory_test.go @@ -17,6 +17,8 @@ import ( var errExpected = errors.New("expected error") +const maxTxNonceDeltaAllowed = 100 + func createStubTopicHandler(matchStrToErrOnCreate string, matchStrToErrOnRegister string) process.TopicHandler { return &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { @@ -65,6 +67,9 @@ func createDataPools() dataRetriever.PoolsHolder { pools.UnsignedTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { return &mock.ShardedDataStub{} } + pools.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{} + } return pools } @@ -83,6 +88,7 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( nil, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -92,7 +98,8 @@ func TestNewInterceptorsContainerFactory_NilAccountsAdapter(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -105,6 +112,7 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, nil, + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -114,19 +122,45 @@ func TestNewInterceptorsContainerFactory_NilShardCoordinatorShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + nil, + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilNodesCoordinator, err) +} + func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { t.Parallel() icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), nil, createStore(), &mock.MarshalizerMock{}, @@ -136,7 +170,8 @@ func TestNewInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -149,6 +184,7 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, nil, &mock.MarshalizerMock{}, @@ -158,7 +194,8 @@ func TestNewInterceptorsContainerFactory_NilBlockchainShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -171,6 +208,7 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), nil, @@ -180,7 +218,8 @@ func TestNewInterceptorsContainerFactory_NilMarshalizerShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -193,6 +232,7 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -202,7 +242,8 @@ func TestNewInterceptorsContainerFactory_NilHasherShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -215,6 +256,7 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -224,7 +266,8 @@ func TestNewInterceptorsContainerFactory_NilKeyGenShouldErr(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -237,6 +280,7 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -246,7 +290,8 @@ func TestNewInterceptorsContainerFactory_NilSingleSignerShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -259,6 +304,7 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -268,7 +314,8 @@ func TestNewInterceptorsContainerFactory_NilMultiSignerShouldErr(t *testing.T) { nil, createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -281,6 +328,7 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -290,7 +338,8 @@ func TestNewInterceptorsContainerFactory_NilDataPoolShouldErr(t *testing.T) { mock.NewMultiSigner(), nil, &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) @@ -303,6 +352,7 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -312,19 +362,45 @@ func TestNewInterceptorsContainerFactory_NilAddrConverterShouldErr(t *testing.T) mock.NewMultiSigner(), createDataPools(), nil, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.Nil(t, icf) assert.Equal(t, process.ErrNilAddressConverter, err) } +func TestNewInterceptorsContainerFactory_NilTxFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + icf, err := shard.NewInterceptorsContainerFactory( + &mock.AccountsStub{}, + mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), + &mock.TopicHandlerStub{}, + createStore(), + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + mock.NewMultiSigner(), + createDataPools(), + &mock.AddressConverterMock{}, + maxTxNonceDeltaAllowed, + nil, + ) + + assert.Nil(t, icf) + assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) +} + func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { t.Parallel() icf, err := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{}, createStore(), &mock.MarshalizerMock{}, @@ -334,7 +410,8 @@ func TestNewInterceptorsContainerFactory_ShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) assert.NotNil(t, icf) @@ -349,6 +426,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.TransactionTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -358,7 +436,8 @@ func TestInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *tes mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -373,6 +452,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.HeadersTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -382,7 +462,8 @@ func TestInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *te mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -397,6 +478,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MiniBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -406,7 +488,8 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldEr mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -421,6 +504,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.PeerChBodyTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -430,7 +514,8 @@ func TestInterceptorsContainerFactory_CreateTopicCreationPeerChBlocksFailsShould mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -445,6 +530,7 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler(factory.MetachainBlocksTopic, ""), createStore(), &mock.MarshalizerMock{}, @@ -454,7 +540,8 @@ func TestInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsSh mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -469,6 +556,7 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.TransactionTopic), createStore(), &mock.MarshalizerMock{}, @@ -478,7 +566,8 @@ func TestInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing. mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -493,6 +582,7 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.HeadersTopic), createStore(), &mock.MarshalizerMock{}, @@ -502,7 +592,8 @@ func TestInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -517,6 +608,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MiniBlocksTopic), createStore(), &mock.MarshalizerMock{}, @@ -526,7 +618,8 @@ func TestInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t * mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -541,6 +634,7 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.PeerChBodyTopic), createStore(), &mock.MarshalizerMock{}, @@ -550,7 +644,8 @@ func TestInterceptorsContainerFactory_CreateRegisterPeerChBlocksFailsShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -565,6 +660,7 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), createStubTopicHandler("", factory.MetachainBlocksTopic), createStore(), &mock.MarshalizerMock{}, @@ -574,7 +670,8 @@ func TestInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -589,6 +686,7 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, mock.NewOneShardCoordinatorMock(), + mock.NewNodesCoordinatorMock(), &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -605,7 +703,8 @@ func TestInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, err := icf.Create() @@ -623,9 +722,17 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.SetNoShards(uint32(noOfShards)) shardCoordinator.CurrentShard = 1 + nodesCoordinator := &mock.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + icf, _ := shard.NewInterceptorsContainerFactory( &mock.AccountsStub{}, shardCoordinator, + nodesCoordinator, &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil @@ -642,18 +749,22 @@ func TestInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { mock.NewMultiSigner(), createDataPools(), &mock.AddressConverterMock{}, - &mock.ChronologyValidatorStub{}, + maxTxNonceDeltaAllowed, + &mock.FeeHandlerStub{}, ) container, _ := icf.Create() numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := numInterceptorTxs numInterceptorHeaders := 1 numInterceptorMiniBlocks := noOfShards numInterceptorPeerChanges := 1 numInterceptorMetachainHeaders := 1 totalInterceptors := numInterceptorTxs + numInterceptorHeaders + numInterceptorMiniBlocks + - numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorTxs + numInterceptorPeerChanges + numInterceptorMetachainHeaders + numInterceptorsUnsignedTxs + + numInterceptorsRewardTxs assert.Equal(t, totalInterceptors, container.Len()) } diff --git a/process/factory/shard/intermediateProcessorsContainerFactory.go b/process/factory/shard/intermediateProcessorsContainerFactory.go index 87d27a55ffa..0e3df3ae758 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory.go @@ -8,16 +8,20 @@ import ( "github.com/ElrondNetwork/elrond-go/marshal" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" + "github.com/ElrondNetwork/elrond-go/process/economics" "github.com/ElrondNetwork/elrond-go/process/factory/containers" "github.com/ElrondNetwork/elrond-go/sharding" ) type intermediateProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - marshalizer marshal.Marshalizer - hasher hashing.Hasher - addrConverter state.AddressConverter - store dataRetriever.StorageService + shardCoordinator sharding.Coordinator + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConverter state.AddressConverter + specialAddressHandler process.SpecialAddressHandler + store dataRetriever.StorageService + poolsHolder dataRetriever.PoolsHolder + economics *economics.EconomicsData } // NewIntermediateProcessorsContainerFactory is responsible for creating a new intermediate processors factory object @@ -26,7 +30,10 @@ func NewIntermediateProcessorsContainerFactory( marshalizer marshal.Marshalizer, hasher hashing.Hasher, addrConverter state.AddressConverter, + specialAddressHandler process.SpecialAddressHandler, store dataRetriever.StorageService, + poolsHolder dataRetriever.PoolsHolder, + economics *economics.EconomicsData, ) (*intermediateProcessorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -41,16 +48,25 @@ func NewIntermediateProcessorsContainerFactory( if addrConverter == nil || addrConverter.IsInterfaceNil() { return nil, process.ErrNilAddressConverter } + if specialAddressHandler == nil || specialAddressHandler.IsInterfaceNil() { + return nil, process.ErrNilSpecialAddressHandler + } if store == nil || store.IsInterfaceNil() { return nil, process.ErrNilStorage } + if poolsHolder == nil || poolsHolder.IsInterfaceNil() { + return nil, process.ErrNilPoolsHolder + } return &intermediateProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - marshalizer: marshalizer, - hasher: hasher, - addrConverter: addrConverter, - store: store, + shardCoordinator: shardCoordinator, + marshalizer: marshalizer, + hasher: hasher, + addrConverter: addrConverter, + specialAddressHandler: specialAddressHandler, + store: store, + poolsHolder: poolsHolder, + economics: economics, }, nil } @@ -68,6 +84,16 @@ func (ppcm *intermediateProcessorsContainerFactory) Create() (process.Intermedia return nil, err } + interproc, err = ppcm.createRewardsTxIntermediateProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.RewardsBlock, interproc) + if err != nil { + return nil, err + } + return container, nil } @@ -84,6 +110,21 @@ func (ppcm *intermediateProcessorsContainerFactory) createSmartContractResultsIn return irp, err } +func (ppcm *intermediateProcessorsContainerFactory) createRewardsTxIntermediateProcessor() (process.IntermediateTransactionHandler, error) { + irp, err := preprocess.NewRewardTxHandler( + ppcm.specialAddressHandler, + ppcm.hasher, + ppcm.marshalizer, + ppcm.shardCoordinator, + ppcm.addrConverter, + ppcm.store, + ppcm.poolsHolder.RewardTransactions(), + ppcm.economics, + ) + + return irp, err +} + // IsInterfaceNil returns true if there is no value under the interface func (ppcm *intermediateProcessorsContainerFactory) IsInterfaceNil() bool { if ppcm == nil { diff --git a/process/factory/shard/intermediateProcessorsContainerFactory_test.go b/process/factory/shard/intermediateProcessorsContainerFactory_test.go index e49063ee632..e32d70e79c9 100644 --- a/process/factory/shard/intermediateProcessorsContainerFactory_test.go +++ b/process/factory/shard/intermediateProcessorsContainerFactory_test.go @@ -1,7 +1,9 @@ -package shard +package shard_test import ( "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/economics" + "github.com/ElrondNetwork/elrond-go/process/factory/shard" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" "testing" @@ -10,12 +12,16 @@ import ( func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -25,12 +31,16 @@ func TestNewIntermediateProcessorsContainerFactory_NilShardCoord(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), nil, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -40,12 +50,16 @@ func TestNewIntermediateProcessorsContainerFactory_NilMarshalizer(t *testing.T) func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, nil, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -55,12 +69,16 @@ func TestNewIntermediateProcessorsContainerFactory_NilHasher(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, nil, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -70,12 +88,16 @@ func TestNewIntermediateProcessorsContainerFactory_NilAdrConv(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, nil, + dPool, + &economics.EconomicsData{}, ) assert.Nil(t, ipcf) @@ -85,12 +107,16 @@ func TestNewIntermediateProcessorsContainerFactory_NilStorer(t *testing.T) { func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, + &economics.EconomicsData{}, ) assert.Nil(t, err) @@ -100,12 +126,16 @@ func TestNewIntermediateProcessorsContainerFactory(t *testing.T) { func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { t.Parallel() - ipcf, err := NewIntermediateProcessorsContainerFactory( + dPool := createDataPools() + ipcf, err := shard.NewIntermediateProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), &mock.MarshalizerMock{}, &mock.HasherMock{}, &mock.AddressConverterMock{}, + &mock.SpecialAddressHandlerMock{}, &mock.ChainStorerMock{}, + dPool, + &economics.EconomicsData{}, ) assert.Nil(t, err) @@ -113,5 +143,5 @@ func TestIntermediateProcessorsContainerFactory_Create(t *testing.T) { container, err := ipcf.Create() assert.Nil(t, err) - assert.Equal(t, 1, container.Len()) + assert.Equal(t, 2, container.Len()) } diff --git a/process/factory/shard/preProcessorsContainerFactory.go b/process/factory/shard/preProcessorsContainerFactory.go index 1c4049089b4..c86f7752b3c 100644 --- a/process/factory/shard/preProcessorsContainerFactory.go +++ b/process/factory/shard/preProcessorsContainerFactory.go @@ -13,17 +13,20 @@ import ( ) type preProcessorsContainerFactory struct { - shardCoordinator sharding.Coordinator - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - hasher hashing.Hasher - dataPool dataRetriever.PoolsHolder - addrConverter state.AddressConverter - txProcessor process.TransactionProcessor - scProcessor process.SmartContractProcessor - scResultProcessor process.SmartContractResultProcessor - accounts state.AccountsAdapter - requestHandler process.RequestHandler + shardCoordinator sharding.Coordinator + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + hasher hashing.Hasher + dataPool dataRetriever.PoolsHolder + addrConverter state.AddressConverter + txProcessor process.TransactionProcessor + scProcessor process.SmartContractProcessor + scResultProcessor process.SmartContractResultProcessor + rewardsTxProcessor process.RewardTransactionProcessor + accounts state.AccountsAdapter + requestHandler process.RequestHandler + rewardsProducer process.InternalTransactionProducer + economicsFee process.FeeHandler } // NewPreProcessorsContainerFactory is responsible for creating a new preProcessors factory object @@ -39,6 +42,9 @@ func NewPreProcessorsContainerFactory( txProcessor process.TransactionProcessor, scProcessor process.SmartContractProcessor, scResultProcessor process.SmartContractResultProcessor, + rewardsTxProcessor process.RewardTransactionProcessor, + rewardsProducer process.InternalTransactionProducer, + economicsFee process.FeeHandler, ) (*preProcessorsContainerFactory, error) { if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { @@ -71,22 +77,34 @@ func NewPreProcessorsContainerFactory( if scResultProcessor == nil || scResultProcessor.IsInterfaceNil() { return nil, process.ErrNilSmartContractResultProcessor } + if rewardsTxProcessor == nil || rewardsTxProcessor.IsInterfaceNil() { + return nil, process.ErrNilRewardsTxProcessor + } if requestHandler == nil || requestHandler.IsInterfaceNil() { return nil, process.ErrNilRequestHandler } + if rewardsProducer == nil || rewardsProducer.IsInterfaceNil() { + return nil, process.ErrNilInternalTransactionProducer + } + if economicsFee == nil || economicsFee.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } return &preProcessorsContainerFactory{ - shardCoordinator: shardCoordinator, - store: store, - marshalizer: marshalizer, - hasher: hasher, - dataPool: dataPool, - addrConverter: addrConverter, - txProcessor: txProcessor, - accounts: accounts, - scProcessor: scProcessor, - scResultProcessor: scResultProcessor, - requestHandler: requestHandler, + shardCoordinator: shardCoordinator, + store: store, + marshalizer: marshalizer, + hasher: hasher, + dataPool: dataPool, + addrConverter: addrConverter, + txProcessor: txProcessor, + accounts: accounts, + scProcessor: scProcessor, + scResultProcessor: scResultProcessor, + rewardsTxProcessor: rewardsTxProcessor, + requestHandler: requestHandler, + rewardsProducer: rewardsProducer, + economicsFee: economicsFee, }, nil } @@ -114,6 +132,16 @@ func (ppcm *preProcessorsContainerFactory) Create() (process.PreProcessorsContai return nil, err } + preproc, err = ppcm.createRewardsTransactionPreProcessor() + if err != nil { + return nil, err + } + + err = container.Add(block.RewardsBlock, preproc) + if err != nil { + return nil, err + } + return container, nil } @@ -127,6 +155,7 @@ func (ppcm *preProcessorsContainerFactory) createTxPreProcessor() (process.PrePr ppcm.shardCoordinator, ppcm.accounts, ppcm.requestHandler.RequestTransaction, + ppcm.economicsFee, ) return txPreprocessor, err @@ -147,6 +176,22 @@ func (ppcm *preProcessorsContainerFactory) createSmartContractResultPreProcessor return scrPreprocessor, err } +func (ppcm *preProcessorsContainerFactory) createRewardsTransactionPreProcessor() (process.PreProcessor, error) { + rewardTxPreprocessor, err := preprocess.NewRewardTxPreprocessor( + ppcm.dataPool.RewardTransactions(), + ppcm.store, + ppcm.hasher, + ppcm.marshalizer, + ppcm.rewardsTxProcessor, + ppcm.rewardsProducer, + ppcm.shardCoordinator, + ppcm.accounts, + ppcm.requestHandler.RequestRewardTransactions, + ) + + return rewardTxPreprocessor, err +} + // IsInterfaceNil returns true if there is no value under the interface func (ppcm *preProcessorsContainerFactory) IsInterfaceNil() bool { if ppcm == nil { diff --git a/process/factory/shard/preProcessorsContainerFactory_test.go b/process/factory/shard/preProcessorsContainerFactory_test.go index 9a21fb18740..518c8c5d9c0 100644 --- a/process/factory/shard/preProcessorsContainerFactory_test.go +++ b/process/factory/shard/preProcessorsContainerFactory_test.go @@ -1,11 +1,12 @@ package shard import ( + "testing" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/stretchr/testify/assert" - "testing" ) func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { @@ -16,13 +17,16 @@ func TestNewPreProcessorsContainerFactory_NilShardCoordinator(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -37,13 +41,16 @@ func TestNewPreProcessorsContainerFactory_NilStore(t *testing.T) { nil, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilStore, err) @@ -58,13 +65,16 @@ func TestNewPreProcessorsContainerFactory_NilMarshalizer(t *testing.T) { &mock.ChainStorerMock{}, nil, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -79,13 +89,16 @@ func TestNewPreProcessorsContainerFactory_NilHasher(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, nil, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilHasher, err) @@ -107,6 +120,9 @@ func TestNewPreProcessorsContainerFactory_NilDataPool(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilDataPoolHolder, err) @@ -121,13 +137,16 @@ func TestNewPreProcessorsContainerFactory_NilAddrConv(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), nil, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -142,13 +161,16 @@ func TestNewPreProcessorsContainerFactory_NilAccounts(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, nil, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -163,13 +185,16 @@ func TestNewPreProcessorsContainerFactory_NilTxProcessor(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, nil, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilTxProcessor, err) @@ -184,13 +209,16 @@ func TestNewPreProcessorsContainerFactory_NilSCProcessor(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, nil, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) @@ -205,19 +233,46 @@ func TestNewPreProcessorsContainerFactory_NilSCR(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, nil, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilSmartContractResultProcessor, err) assert.Nil(t, ppcm) } +func TestNewPreProcessorsContainerFactory_NilRewardTxProcessor(t *testing.T) { + t.Parallel() + + ppcm, err := NewPreProcessorsContainerFactory( + mock.NewMultiShardsCoordinatorMock(3), + &mock.ChainStorerMock{}, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + mock.NewPoolsHolderMock(), + &mock.AddressConverterMock{}, + &mock.AccountsStub{}, + &mock.RequestHandlerMock{}, + &mock.TxProcessorMock{}, + &mock.SCProcessorMock{}, + &mock.SmartContractResultsProcessorMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, + ) + + assert.Equal(t, process.ErrNilRewardsTxProcessor, err) + assert.Nil(t, ppcm) +} + func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { t.Parallel() @@ -226,13 +281,16 @@ func TestNewPreProcessorsContainerFactory_NilRequestHandler(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, nil, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Equal(t, process.ErrNilRequestHandler, err) @@ -247,13 +305,16 @@ func TestNewPreProcessorsContainerFactory(t *testing.T) { &mock.ChainStorerMock{}, &mock.MarshalizerMock{}, &mock.HasherMock{}, - mock.NewPoolsHolderFake(), + mock.NewPoolsHolderMock(), &mock.AddressConverterMock{}, &mock.AccountsStub{}, &mock.RequestHandlerMock{}, &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) @@ -279,6 +340,9 @@ func TestPreProcessorsContainerFactory_CreateErrTxPreproc(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) @@ -314,6 +378,9 @@ func TestPreProcessorsContainerFactory_CreateErrScrPreproc(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) @@ -339,6 +406,12 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { }, } } + dataPool.RewardTransactionsCalled = func() dataRetriever.ShardedDataCacherNotifier { + return &mock.ShardedDataStub{ + RegisterHandlerCalled: func(i func(key []byte)) { + }, + } + } ppcm, err := NewPreProcessorsContainerFactory( mock.NewMultiShardsCoordinatorMock(3), @@ -352,12 +425,15 @@ func TestPreProcessorsContainerFactory_Create(t *testing.T) { &mock.TxProcessorMock{}, &mock.SCProcessorMock{}, &mock.SmartContractResultsProcessorMock{}, + &mock.RewardTxProcessorMock{}, + &mock.IntermediateTransactionHandlerMock{}, + &mock.FeeHandlerStub{}, ) assert.Nil(t, err) assert.NotNil(t, ppcm) container, err := ppcm.Create() - assert.Equal(t, 2, container.Len()) assert.Nil(t, err) + assert.Equal(t, 3, container.Len()) } diff --git a/process/interface.go b/process/interface.go index 4196fb1f7dd..db8127c07a4 100644 --- a/process/interface.go +++ b/process/interface.go @@ -7,6 +7,7 @@ import ( "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/block" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" "github.com/ElrondNetwork/elrond-go/data/smartContractResult" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" @@ -14,7 +15,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/storage" - vmcommon "github.com/ElrondNetwork/elrond-vm-common" + "github.com/ElrondNetwork/elrond-vm-common" ) // TransactionProcessor is the main interface for transaction execution engine @@ -23,6 +24,18 @@ type TransactionProcessor interface { IsInterfaceNil() bool } +// RewardTransactionProcessor is the interface for reward transaction execution engine +type RewardTransactionProcessor interface { + ProcessRewardTransaction(rewardTx *rewardTx.RewardTx) error + IsInterfaceNil() bool +} + +// RewardTransactionPreProcessor prepares the processing of reward transactions +type RewardTransactionPreProcessor interface { + AddComputedRewardMiniBlocks(computedRewardMiniblocks block.MiniBlockSlice) + IsInterfaceNil() bool +} + // SmartContractResultProcessor is the main interface for smart contract result execution engine type SmartContractResultProcessor interface { ProcessSmartContractResult(scr *smartContractResult.SmartContractResult) error @@ -55,13 +68,13 @@ type TransactionCoordinator interface { IsDataPreparedForProcessing(haveTime func() time.Duration) error SaveBlockDataToStorage(body block.Body) error - RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) + RestoreBlockDataFromStorage(body block.Body) (int, error) RemoveBlockDataFromPool(body block.Body) error ProcessBlockTransaction(body block.Body, round uint64, haveTime func() time.Duration) error CreateBlockStarted() - CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) + CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) CreateMbsAndProcessTransactionsFromMe(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) block.MiniBlockSlice CreateMarshalizedData(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) @@ -92,16 +105,50 @@ type IntermediateTransactionHandler interface { IsInterfaceNil() bool } +// InternalTransactionProducer creates system transactions (e.g. rewards) +type InternalTransactionProducer interface { + CreateAllInterMiniBlocks() map[uint32]*block.MiniBlock + IsInterfaceNil() bool +} + +// TransactionVerifier interface validates if the transaction is good and if it should be processed +type TransactionVerifier interface { + IsTransactionValid(tx data.TransactionHandler) error +} + +// TransactionFeeHandler processes the transaction fee +type TransactionFeeHandler interface { + ProcessTransactionFee(cost *big.Int) + IsInterfaceNil() bool +} + +// SpecialAddressHandler responds with needed special addresses +type SpecialAddressHandler interface { + SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardID uint32) error + SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error + ConsensusShardRewardData() *data.ConsensusRewardData + ConsensusMetaRewardData() []*data.ConsensusRewardData + ClearMetaConsensusData() + ElrondCommunityAddress() []byte + LeaderAddress() []byte + BurnAddress() []byte + SetElrondCommunityAddress(elrond []byte) + ShardIdForAddress([]byte) (uint32, error) + Epoch() uint32 + Round() uint64 + IsInterfaceNil() bool +} + // PreProcessor is an interface used to prepare and process transaction data type PreProcessor interface { CreateBlockStarted() IsDataPrepared(requestedTxs int, haveTime func() time.Duration) error RemoveTxBlockFromPools(body block.Body, miniBlockPool storage.Cacher) error - RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) + RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, error) SaveTxBlockToStorage(body block.Body) error - ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error RequestBlockTransactions(body block.Body) int CreateMarshalizedData(txHashes [][]byte) ([][]byte, error) @@ -109,6 +156,7 @@ type PreProcessor interface { RequestTransactionsForMiniBlock(mb block.MiniBlock) int ProcessMiniBlock(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) + CreateAndProcessMiniBlocks(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, error) GetAllCurrentUsedTxs() map[string]data.TransactionHandler IsInterfaceNil() bool @@ -126,6 +174,7 @@ type BlockProcessor interface { DecodeBlockBody(dta []byte) data.BodyHandler DecodeBlockHeader(dta []byte) data.HeaderHandler AddLastNotarizedHdr(shardId uint32, processedHdr data.HeaderHandler) + SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) IsInterfaceNil() bool } @@ -286,34 +335,18 @@ type TopicMessageHandler interface { TopicHandler } -// ChronologyValidator defines the functionality needed to validate a received header block (shard or metachain) -// from chronology point of view -type ChronologyValidator interface { - ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error - IsInterfaceNil() bool -} - // DataPacker can split a large slice of byte slices in smaller packets type DataPacker interface { PackDataInChunks(data [][]byte, limit int) ([][]byte, error) IsInterfaceNil() bool } -// BlocksTracker defines the functionality to track all the notarised blocks -type BlocksTracker interface { - UnnotarisedBlocks() []data.HeaderHandler - RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error - AddBlock(headerHandler data.HeaderHandler) - SetBlockBroadcastRound(nonce uint64, round int64) - BlockBroadcastRound(nonce uint64) int64 - IsInterfaceNil() bool -} - // RequestHandler defines the methods through which request to data can be made type RequestHandler interface { RequestHeaderByNonce(shardId uint32, nonce uint64) RequestTransaction(shardId uint32, txHashes [][]byte) RequestUnsignedTransactions(destShardID uint32, scrHashes [][]byte) + RequestRewardTransactions(destShardID uint32, txHashes [][]byte) RequestMiniBlock(shardId uint32, miniblockHash []byte) RequestHeader(shardId uint32, hash []byte) IsInterfaceNil() bool @@ -358,3 +391,41 @@ type TxValidatorHandler interface { SenderAddress() state.AddressContainer TotalValue() *big.Int } + +// PoolsCleaner define the functionality that is needed for a pools cleaner +type PoolsCleaner interface { + Clean(duration time.Duration) (bool, error) + NumRemovedTxs() uint64 + IsInterfaceNil() bool +} + +// InterceptorThrottler can determine if a new go routine can start +type InterceptorThrottler interface { + CanProcess() bool + StartProcessing() + EndProcessing() + IsInterfaceNil() bool +} + +// RewardsHandler will return information about rewards +type RewardsHandler interface { + RewardsValue() *big.Int + CommunityPercentage() float64 + LeaderPercentage() float64 + BurnPercentage() float64 + IsInterfaceNil() bool +} + +// FeeHandler will return information about fees +type FeeHandler interface { + MinGasPrice() uint64 + MinGasLimit() uint64 + IsInterfaceNil() bool +} + +// EconomicsAddressesHandler will return information about economics addresses +type EconomicsAddressesHandler interface { + CommunityAddress() string + BurnAddress() string + IsInterfaceNil() bool +} diff --git a/process/mock/blockProcessorMock.go b/process/mock/blockProcessorMock.go index 9c764a288e0..100e94f5e9b 100644 --- a/process/mock/blockProcessorMock.go +++ b/process/mock/blockProcessorMock.go @@ -67,6 +67,10 @@ func (blProcMock BlockProcessorMock) AddLastNotarizedHdr(shardId uint32, process blProcMock.AddLastNotarizedHdrCalled(shardId, processedHdr) } +func (blProcMock BlockProcessorMock) SetConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (blProcMock *BlockProcessorMock) IsInterfaceNil() bool { if blProcMock == nil { diff --git a/process/mock/blocksTrackerMock.go b/process/mock/blocksTrackerMock.go deleted file mode 100644 index 864fadad627..00000000000 --- a/process/mock/blocksTrackerMock.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -type BlocksTrackerMock struct { - UnnotarisedBlocksCalled func() []data.HeaderHandler - RemoveNotarisedBlocksCalled func(headerHandler data.HeaderHandler) error - AddBlockCalled func(headerHandler data.HeaderHandler) - SetBlockBroadcastRoundCalled func(nonce uint64, round int64) - BlockBroadcastRoundCalled func(nonce uint64) int64 -} - -func (btm *BlocksTrackerMock) UnnotarisedBlocks() []data.HeaderHandler { - return btm.UnnotarisedBlocksCalled() -} - -func (btm *BlocksTrackerMock) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return btm.RemoveNotarisedBlocksCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) AddBlock(headerHandler data.HeaderHandler) { - btm.AddBlockCalled(headerHandler) -} - -func (btm *BlocksTrackerMock) SetBlockBroadcastRound(nonce uint64, round int64) { - btm.SetBlockBroadcastRoundCalled(nonce, round) -} - -func (btm *BlocksTrackerMock) BlockBroadcastRound(nonce uint64) int64 { - return btm.BlockBroadcastRoundCalled(nonce) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (btm *BlocksTrackerMock) IsInterfaceNil() bool { - if btm == nil { - return true - } - return false -} diff --git a/process/mock/chronologyValidatorStub.go b/process/mock/chronologyValidatorStub.go deleted file mode 100644 index fbb258ae219..00000000000 --- a/process/mock/chronologyValidatorStub.go +++ /dev/null @@ -1,17 +0,0 @@ -package mock - -type ChronologyValidatorStub struct { - ValidateReceivedBlockCalled func(shardID uint32, epoch uint32, nonce uint64, round uint64) error -} - -func (cvs *ChronologyValidatorStub) ValidateReceivedBlock(shardID uint32, epoch uint32, nonce uint64, round uint64) error { - return cvs.ValidateReceivedBlockCalled(shardID, epoch, nonce, round) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (cvs *ChronologyValidatorStub) IsInterfaceNil() bool { - if cvs == nil { - return true - } - return false -} diff --git a/process/mock/feeHandlerStub.go b/process/mock/feeHandlerStub.go new file mode 100644 index 00000000000..f6d983310e7 --- /dev/null +++ b/process/mock/feeHandlerStub.go @@ -0,0 +1,27 @@ +package mock + +type FeeHandlerStub struct { + MinGasPriceCalled func() uint64 + MinGasLimitCalled func() uint64 + MinTxFeeCalled func() uint64 +} + +func (fhs *FeeHandlerStub) MinGasPrice() uint64 { + return fhs.MinGasPriceCalled() +} + +func (fhs *FeeHandlerStub) MinGasLimit() uint64 { + return fhs.MinGasLimitCalled() +} + +func (fhs *FeeHandlerStub) MinTxFee() uint64 { + return fhs.MinTxFeeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (fhs *FeeHandlerStub) IsInterfaceNil() bool { + if fhs == nil { + return true + } + return false +} diff --git a/process/mock/headerHandlerStub.go b/process/mock/headerHandlerStub.go deleted file mode 100644 index 41f496d4bd9..00000000000 --- a/process/mock/headerHandlerStub.go +++ /dev/null @@ -1,125 +0,0 @@ -package mock - -type HeaderHandlerStub struct { - GetMiniBlockHeadersWithDstCalled func(destId uint32) map[string]uint32 -} - -func (hhs *HeaderHandlerStub) GetShardID() uint32 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetNonce() uint64 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetEpoch() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetRound() uint64 { - return 1 -} - -func (hhs *HeaderHandlerStub) GetTimeStamp() uint64 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetRootHash() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetPrevHash() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetPrevRandSeed() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetRandSeed() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetPubKeysBitmap() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetSignature() []byte { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetTxCount() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetNonce(n uint64) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetEpoch(e uint32) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetRound(r uint64) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetTimeStamp(ts uint64) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetRootHash(rHash []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetPrevHash(pvHash []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetPrevRandSeed(pvRandSeed []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetRandSeed(randSeed []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetPubKeysBitmap(pkbm []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetSignature(sg []byte) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetTxCount(txCount uint32) { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) GetMiniBlockHeadersWithDst(destId uint32) map[string]uint32 { - return hhs.GetMiniBlockHeadersWithDstCalled(destId) -} - -func (hhs *HeaderHandlerStub) GetMiniBlockProcessed(hash []byte) bool { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) SetMiniBlockProcessed(hash []byte, processed bool) { - panic("implement me") -} - -// IsInterfaceNil returns true if there is no value under the interface -func (hhs *HeaderHandlerStub) IsInterfaceNil() bool { - if hhs == nil { - return true - } - return false -} - -func (hhs *HeaderHandlerStub) ItemsInHeader() uint32 { - panic("implement me") -} - -func (hhs *HeaderHandlerStub) ItemsInBody() uint32 { - panic("implement me") -} diff --git a/process/mock/indexerMock.go b/process/mock/indexerMock.go index 378e0f44f85..b9fe13016ac 100644 --- a/process/mock/indexerMock.go +++ b/process/mock/indexerMock.go @@ -1,6 +1,7 @@ package mock import ( + "github.com/ElrondNetwork/elrond-go/core/indexer" "github.com/ElrondNetwork/elrond-go/core/statistics" "github.com/ElrondNetwork/elrond-go/data" ) @@ -10,16 +11,28 @@ type IndexerMock struct { SaveBlockCalled func(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) } -func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler) { +func (im *IndexerMock) SaveBlock(body data.BodyHandler, header data.HeaderHandler, txPool map[string]data.TransactionHandler, signersIndexes []uint64) { if im.SaveBlockCalled != nil { im.SaveBlockCalled(body, header, txPool) } } +func (im *IndexerMock) SaveMetaBlock(header data.HeaderHandler, signersIndexes []uint64) { + return +} + func (im *IndexerMock) UpdateTPS(tpsBenchmark statistics.TPSBenchmark) { panic("implement me") } +func (im *IndexerMock) SaveRoundInfo(roundInfo indexer.RoundInfo) { + return +} + +func (im *IndexerMock) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte) { + panic("implement me") +} + // IsInterfaceNil returns true if there is no value under the interface func (im *IndexerMock) IsInterfaceNil() bool { if im == nil { @@ -27,3 +40,7 @@ func (im *IndexerMock) IsInterfaceNil() bool { } return false } + +func (im *IndexerMock) IsNilIndexer() bool { + return true +} diff --git a/process/mock/interceptorThrottlerStub.go b/process/mock/interceptorThrottlerStub.go new file mode 100644 index 00000000000..a2013e23adc --- /dev/null +++ b/process/mock/interceptorThrottlerStub.go @@ -0,0 +1,36 @@ +package mock + +import "sync/atomic" + +type InterceptorThrottlerStub struct { + CanProcessCalled func() bool + startProcessingCount int32 + endProcessingCount int32 +} + +func (it *InterceptorThrottlerStub) CanProcess() bool { + return it.CanProcessCalled() +} + +func (it *InterceptorThrottlerStub) StartProcessing() { + atomic.AddInt32(&it.startProcessingCount, 1) +} + +func (it *InterceptorThrottlerStub) EndProcessing() { + atomic.AddInt32(&it.endProcessingCount, 1) +} + +func (it *InterceptorThrottlerStub) StartProcessingCount() int32 { + return atomic.LoadInt32(&it.startProcessingCount) +} + +func (it *InterceptorThrottlerStub) EndProcessingCount() int32 { + return atomic.LoadInt32(&it.endProcessingCount) +} + +func (it *InterceptorThrottlerStub) IsInterfaceNil() bool { + if it == nil { + return true + } + return false +} diff --git a/process/mock/intermediateTransactionHandlerMock.go b/process/mock/intermediateTransactionHandlerMock.go index f78983821ae..75ea84b3276 100644 --- a/process/mock/intermediateTransactionHandlerMock.go +++ b/process/mock/intermediateTransactionHandlerMock.go @@ -52,7 +52,7 @@ func (ith *IntermediateTransactionHandlerMock) SaveCurrentIntermediateTxToStorag func (ith *IntermediateTransactionHandlerMock) CreateBlockStarted() { if ith.CreateBlockStartedCalled != nil { - ith.CreateAllInterMiniBlocksCalled() + ith.CreateBlockStartedCalled() } } diff --git a/process/mock/miniBlocksResolverMock.go b/process/mock/miniBlocksResolverMock.go index 8b2a5a64518..9dc3364aa95 100644 --- a/process/mock/miniBlocksResolverMock.go +++ b/process/mock/miniBlocksResolverMock.go @@ -9,7 +9,8 @@ type MiniBlocksResolverMock struct { RequestDataFromHashCalled func(hash []byte) error RequestDataFromHashArrayCalled func(hashes [][]byte) error ProcessReceivedMessageCalled func(message p2p.MessageP2P) error - GetMiniBlocksCalled func(hashes [][]byte) block.MiniBlockSlice + GetMiniBlocksCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) + GetMiniBlocksFromPoolCalled func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) } func (hrm *MiniBlocksResolverMock) RequestDataFromHash(hash []byte) error { @@ -24,10 +25,14 @@ func (hrm *MiniBlocksResolverMock) ProcessReceivedMessage(message p2p.MessageP2P return hrm.ProcessReceivedMessageCalled(message) } -func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) block.MiniBlockSlice { +func (hrm *MiniBlocksResolverMock) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return hrm.GetMiniBlocksCalled(hashes) } +func (hrm *MiniBlocksResolverMock) GetMiniBlocksFromPool(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return hrm.GetMiniBlocksFromPoolCalled(hashes) +} + // IsInterfaceNil returns true if there is no value under the interface func (hrm *MiniBlocksResolverMock) IsInterfaceNil() bool { if hrm == nil { diff --git a/process/mock/multiSigMock.go b/process/mock/multiSigMock.go index 453660e848f..7b3c2bc8633 100644 --- a/process/mock/multiSigMock.go +++ b/process/mock/multiSigMock.go @@ -1,6 +1,8 @@ package mock import ( + "bytes" + "github.com/ElrondNetwork/elrond-go/crypto" "github.com/ElrondNetwork/elrond-go/hashing" ) @@ -77,13 +79,28 @@ func (bnm *BelNevMock) SetAggregatedSig(aggSig []byte) error { // Verify returns nil if the aggregateed signature is verified for the given public keys func (bnm *BelNevMock) Verify(msg []byte, bitmap []byte) error { - return bnm.VerifyMock(msg, bitmap) + if bnm.VerifyMock != nil { + return bnm.VerifyMock(msg, bitmap) + } + + if msg == nil { + return crypto.ErrNilMessage + } + + if bitmap == nil { + return crypto.ErrNilBitmap + } + + return nil } // CreateCommitment creates a secret commitment and the corresponding public commitment point func (bnm *BelNevMock) CreateCommitment() (commSecret []byte, commitment []byte) { + if bnm.CreateCommitmentMock != nil { + return bnm.CreateCommitmentMock() + } - return bnm.CreateCommitmentMock() + return []byte("commitment secret"), []byte("commitment") } // StoreCommitmentHash adds a commitment hash to the list on the specified position @@ -92,18 +109,18 @@ func (bnm *BelNevMock) StoreCommitmentHash(index uint16, commHash []byte) error bnm.commHash = commHash return nil - } else { - return bnm.StoreCommitmentHashMock(index, commHash) } + + return bnm.StoreCommitmentHashMock(index, commHash) } // CommitmentHash returns the commitment hash from the list on the specified position func (bnm *BelNevMock) CommitmentHash(index uint16) ([]byte, error) { if bnm.CommitmentHashMock == nil { return bnm.commHash, nil - } else { - return bnm.CommitmentHashMock(index) } + + return bnm.CommitmentHashMock(index) } // StoreCommitment adds a commitment to the list on the specified position @@ -116,9 +133,9 @@ func (bnm *BelNevMock) StoreCommitment(index uint16, value []byte) error { bnm.commitments[index] = value return nil - } else { - return bnm.StoreCommitmentMock(index, value) } + + return bnm.StoreCommitmentMock(index, value) } // Commitment returns the commitment from the list with the specified position @@ -129,19 +146,27 @@ func (bnm *BelNevMock) Commitment(index uint16) ([]byte, error) { } return bnm.commitments[index], nil - } else { - return bnm.CommitmentMock(index) } + + return bnm.CommitmentMock(index) } // AggregateCommitments aggregates the list of commitments func (bnm *BelNevMock) AggregateCommitments(bitmap []byte) error { - return bnm.AggregateCommitmentsMock(bitmap) + if bnm.AggregateCommitmentsMock != nil { + return bnm.AggregateCommitmentsMock(bitmap) + } + + return nil } // CreateSignatureShare creates a partial signature func (bnm *BelNevMock) CreateSignatureShare(msg []byte, bitmap []byte) ([]byte, error) { - return bnm.CreateSignatureShareMock(msg, bitmap) + if bnm.CreateSignatureShareMock != nil { + return bnm.CreateSignatureShareMock(msg, bitmap) + } + + return []byte("signature share"), nil } // StoreSignatureShare adds the partial signature of the signer with specified position @@ -156,12 +181,28 @@ func (bnm *BelNevMock) StoreSignatureShare(index uint16, sig []byte) error { // VerifySignatureShare verifies the partial signature of the signer with specified position func (bnm *BelNevMock) VerifySignatureShare(index uint16, sig []byte, msg []byte, bitmap []byte) error { - return bnm.VerifySignatureShareMock(index, sig, msg, bitmap) + if bnm.VerifySignatureShareMock(index, sig, msg, bitmap) != nil { + return bnm.VerifySignatureShareMock(index, sig, msg, bitmap) + } + + if bytes.Equal([]byte("signature share"), sig) { + return nil + } + + return crypto.ErrSigNotValid } // AggregateSigs aggregates all collected partial signatures func (bnm *BelNevMock) AggregateSigs(bitmap []byte) ([]byte, error) { - return bnm.AggregateSigsMock(bitmap) + if bnm.AggregateSigsMock != nil { + return bnm.AggregateSigsMock(bitmap) + } + + if bitmap == nil { + return nil, crypto.ErrNilBitmap + } + + return []byte("aggregated signature"), nil } // SignatureShare diff --git a/process/mock/nodesCoordinatorMock.go b/process/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..72ee4c72df3 --- /dev/null +++ b/process/mock/nodesCoordinatorMock.go @@ -0,0 +1,193 @@ +package mock + +import ( + "bytes" + "fmt" + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinatorMock struct { + Validators map[uint32][]sharding.Validator + ShardConsensusSize uint32 + MetaConsensusSize uint32 + ShardId uint32 + NbShards uint32 + GetSelectedPublicKeysCalled func(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeysCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddressesCalled func(randomness []byte, round uint64, shardId uint32) ([]string, error) + LoadNodesPerShardsCalled func(nodes map[uint32][]sharding.Validator) error + ComputeValidatorsGroupCalled func(randomness []byte, round uint64, shardId uint32) (validatorsGroup []sharding.Validator, err error) + GetValidatorWithPublicKeyCalled func(publicKey []byte) (validator sharding.Validator, shardId uint32, err error) +} + +func NewNodesCoordinatorMock() *NodesCoordinatorMock { + nbShards := uint32(1) + nodesPerShard := 2 + validatorsMap := make(map[uint32][]sharding.Validator) + + for sh := uint32(0); sh < nbShards; sh++ { + validatorsList := make([]sharding.Validator, nodesPerShard) + for v := 0; v < nodesPerShard; v++ { + validatorsList[v], _ = sharding.NewValidator( + big.NewInt(10), + 1, + []byte(fmt.Sprintf("pubKey%d%d", sh, v)), + []byte(fmt.Sprintf("address%d%d", sh, v)), + ) + } + validatorsMap[sh] = validatorsList + } + + return &NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + ShardId: 0, + NbShards: nbShards, + Validators: validatorsMap, + } +} + +func (ncm *NodesCoordinatorMock) GetAllValidatorsPublicKeys() map[uint32][][]byte { + return nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsIndexes(publicKeys []string) []uint64 { + return nil +} + +func (ncm *NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if ncm.GetSelectedPublicKeysCalled != nil { + return ncm.GetSelectedPublicKeysCalled(selection, shardId) + } + + if len(ncm.Validators) == 0 { + return nil, sharding.ErrNilInputNodesMap + } + + pubKeys := make([]string, 0) + + for _, v := range ncm.Validators[shardId] { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + valGrStr := make([]string, 0) + + for _, v := range validators { + valGrStr = append(valGrStr, string(v.PubKey())) + } + + return valGrStr, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsRewardsAddressesCalled(randomness, round, shardId) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, 0) + for _, v := range validators { + addresses = append(addresses, string(v.Address())) + } + + return addresses, nil +} + +func (ncm *NodesCoordinatorMock) SetNodesPerShards(nodes map[uint32][]sharding.Validator) error { + if ncm.LoadNodesPerShardsCalled != nil { + return ncm.LoadNodesPerShardsCalled(nodes) + } + + if nodes == nil { + return sharding.ErrNilInputNodesMap + } + + ncm.Validators = nodes + + return nil +} + +func (ncm *NodesCoordinatorMock) ComputeValidatorsGroup( + randomess []byte, + round uint64, + shardId uint32, +) ([]sharding.Validator, error) { + var consensusSize uint32 + + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomess, round, shardId) + } + + if ncm.ShardId == sharding.MetachainShardId { + consensusSize = ncm.MetaConsensusSize + } else { + consensusSize = ncm.ShardConsensusSize + } + + if randomess == nil { + return nil, sharding.ErrNilRandomness + } + + validatorsGroup := make([]sharding.Validator, 0) + + for i := uint32(0); i < consensusSize; i++ { + validatorsGroup = append(validatorsGroup, ncm.Validators[shardId][i]) + } + + return validatorsGroup, nil +} + +func (ncm *NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + if ncm.GetValidatorWithPublicKeyCalled != nil { + return ncm.GetValidatorWithPublicKeyCalled(publicKey) + } + + if publicKey == nil { + return nil, 0, sharding.ErrNilPubKey + } + + for shardId, shardEligible := range ncm.Validators { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, sharding.ErrValidatorNotFound +} + +func (ncm *NodesCoordinatorMock) IsInterfaceNil() bool { + if ncm == nil { + return true + } + return false +} diff --git a/process/mock/poolsHolderFake.go b/process/mock/poolsHolderMock.go similarity index 56% rename from process/mock/poolsHolderFake.go rename to process/mock/poolsHolderMock.go index 273a9d0f9c6..f3f9a30c576 100644 --- a/process/mock/poolsHolderFake.go +++ b/process/mock/poolsHolderMock.go @@ -9,9 +9,10 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/storageUnit" ) -type PoolsHolderFake struct { +type PoolsHolderMock struct { transactions dataRetriever.ShardedDataCacherNotifier - unsignedtransactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier headers storage.Cacher metaBlocks storage.Cacher hdrNonces dataRetriever.Uint64SyncMapCacher @@ -20,10 +21,11 @@ type PoolsHolderFake struct { metaHdrNonces dataRetriever.Uint64SyncMapCacher } -func NewPoolsHolderFake() *PoolsHolderFake { - phf := &PoolsHolderFake{} +func NewPoolsHolderMock() *PoolsHolderMock { + phf := &PoolsHolderMock{} phf.transactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) - phf.unsignedtransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.unsignedTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 10000, Type: storageUnit.LRUCache}) + phf.rewardTransactions, _ = shardedData.NewShardedData(storageUnit.CacheConfig{Size: 100, Type: storageUnit.LRUCache}) phf.headers, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) phf.metaBlocks, _ = storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) cacheHdrNonces, _ := storageUnit.NewCache(storageUnit.LRUCache, 10000, 1) @@ -41,48 +43,52 @@ func NewPoolsHolderFake() *PoolsHolderFake { return phf } -func (phf *PoolsHolderFake) Transactions() dataRetriever.ShardedDataCacherNotifier { - return phf.transactions +func (phm *PoolsHolderMock) Transactions() dataRetriever.ShardedDataCacherNotifier { + return phm.transactions } -func (phf *PoolsHolderFake) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { - return phf.unsignedtransactions +func (phm *PoolsHolderMock) UnsignedTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.unsignedTransactions } -func (phf *PoolsHolderFake) Headers() storage.Cacher { - return phf.headers +func (phm *PoolsHolderMock) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phm.rewardTransactions } -func (phf *PoolsHolderFake) HeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phf.hdrNonces +func (phm *PoolsHolderMock) Headers() storage.Cacher { + return phm.headers } -func (phf *PoolsHolderFake) MiniBlocks() storage.Cacher { - return phf.miniBlocks +func (phm *PoolsHolderMock) HeadersNonces() dataRetriever.Uint64SyncMapCacher { + return phm.hdrNonces } -func (phf *PoolsHolderFake) PeerChangesBlocks() storage.Cacher { - return phf.peerChangesBlocks +func (phm *PoolsHolderMock) MiniBlocks() storage.Cacher { + return phm.miniBlocks } -func (phf *PoolsHolderFake) MetaBlocks() storage.Cacher { - return phf.metaBlocks +func (phm *PoolsHolderMock) PeerChangesBlocks() storage.Cacher { + return phm.peerChangesBlocks } -func (phf *PoolsHolderFake) MetaHeadersNonces() dataRetriever.Uint64SyncMapCacher { - return phf.metaHdrNonces +func (phm *PoolsHolderMock) MetaBlocks() storage.Cacher { + return phm.metaBlocks } -func (phf *PoolsHolderFake) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { - phf.transactions = transactions +func (phm *PoolsHolderMock) MetaHeadersNonces() dataRetriever.Uint64SyncMapCacher { + return phm.metaHdrNonces } -func (phf *PoolsHolderFake) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { - phf.unsignedtransactions = scrs +func (phm *PoolsHolderMock) SetTransactions(transactions dataRetriever.ShardedDataCacherNotifier) { + phm.transactions = transactions +} + +func (phm *PoolsHolderMock) SetUnsignedTransactions(scrs dataRetriever.ShardedDataCacherNotifier) { + phm.unsignedTransactions = scrs } // IsInterfaceNil returns true if there is no value under the interface -func (phf *PoolsHolderFake) IsInterfaceNil() bool { +func (phf *PoolsHolderMock) IsInterfaceNil() bool { if phf == nil { return true } diff --git a/process/mock/poolsHolderStub.go b/process/mock/poolsHolderStub.go index 43599982ea8..d189b57d055 100644 --- a/process/mock/poolsHolderStub.go +++ b/process/mock/poolsHolderStub.go @@ -11,6 +11,7 @@ type PoolsHolderStub struct { PeerChangesBlocksCalled func() storage.Cacher TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier MiniBlocksCalled func() storage.Cacher MetaBlocksCalled func() storage.Cacher } @@ -43,6 +44,10 @@ func (phs *PoolsHolderStub) UnsignedTransactions() dataRetriever.ShardedDataCach return phs.UnsignedTransactionsCalled() } +func (phs *PoolsHolderStub) RewardTransactions() dataRetriever.ShardedDataCacherNotifier { + return phs.RewardTransactionsCalled() +} + // IsInterfaceNil returns true if there is no value under the interface func (phs *PoolsHolderStub) IsInterfaceNil() bool { if phs == nil { diff --git a/process/mock/poolscleanerMock.go b/process/mock/poolscleanerMock.go new file mode 100644 index 00000000000..c5b32a5e6c7 --- /dev/null +++ b/process/mock/poolscleanerMock.go @@ -0,0 +1,27 @@ +package mock + +import "time" + +type TxPoolsCleanerMock struct { + CleanCalled func(duration time.Duration) (bool, error) + NumRemovedTxsCalled func() uint64 +} + +// Clean will check if in pools exits transactions with nonce low that transaction sender account nonce +// and if tx have low nonce will be removed from pools +func (tpc *TxPoolsCleanerMock) Clean(duration time.Duration) (bool, error) { + return false, nil +} + +// NumRemovedTxs will return the number of removed txs from pools +func (tpc *TxPoolsCleanerMock) NumRemovedTxs() uint64 { + return 0 +} + +// IsInterfaceNil returns true if there is no value under the interface +func (tpc *TxPoolsCleanerMock) IsInterfaceNil() bool { + if tpc == nil { + return true + } + return false +} diff --git a/process/mock/preprocessorMock.go b/process/mock/preprocessorMock.go index c4e069a0bf9..ab03b54b001 100644 --- a/process/mock/preprocessorMock.go +++ b/process/mock/preprocessorMock.go @@ -12,13 +12,14 @@ type PreProcessorMock struct { CreateBlockStartedCalled func() IsDataPreparedCalled func(requestedTxs int, haveTime func() time.Duration) error RemoveTxBlockFromPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) error - RestoreTxBlockIntoPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) + RestoreTxBlockIntoPoolsCalled func(body block.Body, miniBlockPool storage.Cacher) (int, error) SaveTxBlockToStorageCalled func(body block.Body) error - ProcessBlockTransactionsCalled func(body block.Body, round uint64, haveTime func() time.Duration) error + ProcessBlockTransactionsCalled func(body block.Body, round uint64, haveTime func() bool) error RequestBlockTransactionsCalled func(body block.Body) int CreateMarshalizedDataCalled func(txHashes [][]byte) ([][]byte, error) RequestTransactionsForMiniBlockCalled func(mb block.MiniBlock) int ProcessMiniBlockCalled func(miniBlock *block.MiniBlock, haveTime func() bool, round uint64) error + CreateAndProcessMiniBlocksCalled func(maxTxSpaceRemained uint32, maxMbSpaceRemained uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, error) CreateAndProcessMiniBlockCalled func(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) GetAllCurrentUsedTxsCalled func() map[string]data.TransactionHandler } @@ -44,9 +45,9 @@ func (ppm *PreProcessorMock) RemoveTxBlockFromPools(body block.Body, miniBlockPo return ppm.RemoveTxBlockFromPoolsCalled(body, miniBlockPool) } -func (ppm *PreProcessorMock) RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, map[int][]byte, error) { +func (ppm *PreProcessorMock) RestoreTxBlockIntoPools(body block.Body, miniBlockPool storage.Cacher) (int, error) { if ppm.RestoreTxBlockIntoPoolsCalled == nil { - return 0, nil, nil + return 0, nil } return ppm.RestoreTxBlockIntoPoolsCalled(body, miniBlockPool) } @@ -58,7 +59,7 @@ func (ppm *PreProcessorMock) SaveTxBlockToStorage(body block.Body) error { return ppm.SaveTxBlockToStorageCalled(body) } -func (ppm *PreProcessorMock) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() time.Duration) error { +func (ppm *PreProcessorMock) ProcessBlockTransactions(body block.Body, round uint64, haveTime func() bool) error { if ppm.ProcessBlockTransactionsCalled == nil { return nil } @@ -93,6 +94,20 @@ func (ppm *PreProcessorMock) ProcessMiniBlock(miniBlock *block.MiniBlock, haveTi return ppm.ProcessMiniBlockCalled(miniBlock, haveTime, round) } +// CreateAndProcessMiniBlocks creates miniblocks from storage and processes the reward transactions added into the miniblocks +// as long as it has time +func (ppm *PreProcessorMock) CreateAndProcessMiniBlocks( + maxTxSpaceRemained uint32, + maxMbSpaceRemained uint32, + round uint64, + haveTime func() bool, +) (block.MiniBlockSlice, error) { + if ppm.CreateAndProcessMiniBlocksCalled == nil { + return nil, nil + } + return ppm.CreateAndProcessMiniBlocksCalled(maxTxSpaceRemained, maxMbSpaceRemained, round, haveTime) +} + func (ppm *PreProcessorMock) CreateAndProcessMiniBlock(sndShardId, dstShardId uint32, spaceRemained int, haveTime func() bool, round uint64) (*block.MiniBlock, error) { if ppm.CreateAndProcessMiniBlockCalled == nil { return nil, nil diff --git a/process/mock/requestHandlerMock.go b/process/mock/requestHandlerMock.go index 8f6f016ecd5..0ebe0e160d0 100644 --- a/process/mock/requestHandlerMock.go +++ b/process/mock/requestHandlerMock.go @@ -3,6 +3,7 @@ package mock type RequestHandlerMock struct { RequestTransactionHandlerCalled func(destShardID uint32, txHashes [][]byte) RequestScrHandlerCalled func(destShardID uint32, txHashes [][]byte) + RequestRewardTxHandlerCalled func(destShardID uint32, txHashes [][]byte) RequestMiniBlockHandlerCalled func(destShardID uint32, miniblockHash []byte) RequestHeaderHandlerCalled func(destShardID uint32, hash []byte) RequestHeaderHandlerByNonceCalled func(destShardID uint32, nonce uint64) @@ -22,6 +23,13 @@ func (rrh *RequestHandlerMock) RequestUnsignedTransactions(destShardID uint32, t rrh.RequestScrHandlerCalled(destShardID, txHashes) } +func (rrh *RequestHandlerMock) RequestRewardTransactions(destShardID uint32, txHashes [][]byte) { + if rrh.RequestRewardTxHandlerCalled == nil { + return + } + rrh.RequestRewardTxHandlerCalled(destShardID, txHashes) +} + func (rrh *RequestHandlerMock) RequestMiniBlock(shardId uint32, miniblockHash []byte) { if rrh.RequestMiniBlockHandlerCalled == nil { return diff --git a/process/mock/rewardTxProcessorMock.go b/process/mock/rewardTxProcessorMock.go new file mode 100644 index 00000000000..883879e6b56 --- /dev/null +++ b/process/mock/rewardTxProcessorMock.go @@ -0,0 +1,24 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data/rewardTx" +) + +type RewardTxProcessorMock struct { + ProcessRewardTransactionCalled func(rTx *rewardTx.RewardTx) error +} + +func (scrp *RewardTxProcessorMock) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { + if scrp.ProcessRewardTransactionCalled == nil { + return nil + } + + return scrp.ProcessRewardTransactionCalled(rTx) +} + +func (scrp *RewardTxProcessorMock) IsInterfaceNil() bool { + if scrp == nil { + return true + } + return false +} diff --git a/process/mock/rewardsHandlerMock.go b/process/mock/rewardsHandlerMock.go new file mode 100644 index 00000000000..d9a1a696c22 --- /dev/null +++ b/process/mock/rewardsHandlerMock.go @@ -0,0 +1,34 @@ +package mock + +import "math/big" + +type RewardsHandlerMock struct { + RewardsValueCalled func() *big.Int + CommunityPercentageCalled func() float64 + LeaderPercentageCalled func() float64 + BurnPercentageCalled func() float64 +} + +func (rhm *RewardsHandlerMock) RewardsValue() *big.Int { + return rhm.RewardsValueCalled() +} + +func (rhm *RewardsHandlerMock) CommunityPercentage() float64 { + return rhm.CommunityPercentageCalled() +} + +func (rhm *RewardsHandlerMock) LeaderPercentage() float64 { + return rhm.LeaderPercentageCalled() +} + +func (rhm *RewardsHandlerMock) BurnPercentage() float64 { + return rhm.BurnPercentageCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rhm *RewardsHandlerMock) IsInterfaceNil() bool { + if rhm == nil { + return true + } + return false +} diff --git a/process/mock/specialAddressHandlerMock.go b/process/mock/specialAddressHandlerMock.go new file mode 100644 index 00000000000..661756611ef --- /dev/null +++ b/process/mock/specialAddressHandlerMock.go @@ -0,0 +1,148 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type SpecialAddressHandlerMock struct { + ElrondCommunityAddressCalled func() []byte + LeaderAddressCalled func() []byte + BurnAddressCalled func() []byte + ShardIdForAddressCalled func([]byte) (uint32, error) + AdrConv state.AddressConverter + ShardCoordinator sharding.Coordinator + NodesCoordinator sharding.NodesCoordinator + + shardConsensusData *data.ConsensusRewardData + metaConsensusData []*data.ConsensusRewardData +} + +func NewSpecialAddressHandlerMock( + addrConv state.AddressConverter, + shardCoordinator sharding.Coordinator, + nodesCoordinator sharding.NodesCoordinator, +) *SpecialAddressHandlerMock { + return &SpecialAddressHandlerMock{ + ElrondCommunityAddressCalled: nil, + LeaderAddressCalled: nil, + BurnAddressCalled: nil, + ShardIdForAddressCalled: nil, + AdrConv: addrConv, + ShardCoordinator: shardCoordinator, + NodesCoordinator: nodesCoordinator, + shardConsensusData: &data.ConsensusRewardData{ + Round: 0, + Epoch: 0, + Addresses: nil, + }, + metaConsensusData: make([]*data.ConsensusRewardData, 0), + } +} + +func (sh *SpecialAddressHandlerMock) SetElrondCommunityAddress(elrond []byte) { +} + +func (sh *SpecialAddressHandlerMock) SetShardConsensusData(randomness []byte, round uint64, epoch uint32, shardId uint32) error { + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, shardId) + if err != nil { + return err + } + + sh.shardConsensusData = &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + } + + return nil +} + +func (sh *SpecialAddressHandlerMock) ConsensusShardRewardData() *data.ConsensusRewardData { + return sh.shardConsensusData +} + +func (sh *SpecialAddressHandlerMock) SetMetaConsensusData(randomness []byte, round uint64, epoch uint32) error { + if sh.metaConsensusData == nil { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) + } + + addresses, err := sh.NodesCoordinator.GetValidatorsRewardsAddresses(randomness, round, sharding.MetachainShardId) + if err != nil { + return err + } + + sh.metaConsensusData = append(sh.metaConsensusData, &data.ConsensusRewardData{ + Round: round, + Epoch: epoch, + Addresses: addresses, + }) + + return nil +} + +func (sh *SpecialAddressHandlerMock) ClearMetaConsensusData() { + sh.metaConsensusData = make([]*data.ConsensusRewardData, 0) +} + +func (sh *SpecialAddressHandlerMock) ConsensusMetaRewardData() []*data.ConsensusRewardData { + return sh.metaConsensusData +} + +func (sh *SpecialAddressHandlerMock) BurnAddress() []byte { + if sh.BurnAddressCalled == nil { + return []byte("burn") + } + + return sh.BurnAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ElrondCommunityAddress() []byte { + if sh.ElrondCommunityAddressCalled == nil { + return []byte("elrond") + } + + return sh.ElrondCommunityAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) Round() uint64 { + if sh.shardConsensusData == nil { + return 0 + } + + return sh.shardConsensusData.Round +} + +func (sh *SpecialAddressHandlerMock) Epoch() uint32 { + if sh.shardConsensusData == nil { + return 0 + } + + return sh.shardConsensusData.Epoch +} + +func (sh *SpecialAddressHandlerMock) LeaderAddress() []byte { + if sh.LeaderAddressCalled == nil { + return []byte("leader") + } + + return sh.LeaderAddressCalled() +} + +func (sh *SpecialAddressHandlerMock) ShardIdForAddress(addr []byte) (uint32, error) { + convAdr, err := sh.AdrConv.CreateAddressFromPublicKeyBytes(addr) + if err != nil { + return 0, err + } + + return sh.ShardCoordinator.ComputeId(convAdr), nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sh *SpecialAddressHandlerMock) IsInterfaceNil() bool { + if sh == nil { + return true + } + return false +} diff --git a/process/mock/storerStub.go b/process/mock/storerStub.go index d189606d753..af7d1b3ee16 100644 --- a/process/mock/storerStub.go +++ b/process/mock/storerStub.go @@ -4,7 +4,6 @@ type StorerStub struct { PutCalled func(key, data []byte) error GetCalled func(key []byte) ([]byte, error) HasCalled func(key []byte) error - HasOrAddCalled func(key []byte, value []byte) error RemoveCalled func(key []byte) error ClearCacheCalled func() DestroyUnitCalled func() error @@ -22,10 +21,6 @@ func (ss *StorerStub) Has(key []byte) error { return ss.HasCalled(key) } -func (ss *StorerStub) HasOrAdd(key []byte, value []byte) error { - return ss.HasOrAddCalled(key, value) -} - func (ss *StorerStub) Remove(key []byte) error { return ss.RemoveCalled(key) } diff --git a/process/mock/transactionCoordinatorMock.go b/process/mock/transactionCoordinatorMock.go index 7437c451aad..a1b71e7ff8e 100644 --- a/process/mock/transactionCoordinatorMock.go +++ b/process/mock/transactionCoordinatorMock.go @@ -14,11 +14,11 @@ type TransactionCoordinatorMock struct { RequestBlockTransactionsCalled func(body block.Body) IsDataPreparedForProcessingCalled func(haveTime func() time.Duration) error SaveBlockDataToStorageCalled func(body block.Body) error - RestoreBlockDataFromStorageCalled func(body block.Body) (int, map[int][][]byte, error) + RestoreBlockDataFromStorageCalled func(body block.Body) (int, error) RemoveBlockDataFromPoolCalled func(body block.Body) error ProcessBlockTransactionCalled func(body block.Body, round uint64, haveTime func() time.Duration) error CreateBlockStartedCalled func() - CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) + CreateMbsAndProcessCrossShardTransactionsDstMeCalled func(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) CreateMbsAndProcessTransactionsFromMeCalled func(maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) block.MiniBlockSlice CreateMarshalizedDataCalled func(body block.Body) (map[uint32]block.MiniBlockSlice, map[string][][]byte) GetAllCurrentUsedTxsCalled func(blockType block.Type) map[string]data.TransactionHandler @@ -65,9 +65,9 @@ func (tcm *TransactionCoordinatorMock) SaveBlockDataToStorage(body block.Body) e return tcm.SaveBlockDataToStorageCalled(body) } -func (tcm *TransactionCoordinatorMock) RestoreBlockDataFromStorage(body block.Body) (int, map[int][][]byte, error) { +func (tcm *TransactionCoordinatorMock) RestoreBlockDataFromStorage(body block.Body) (int, error) { if tcm.RestoreBlockDataFromStorageCalled == nil { - return 0, nil, nil + return 0, nil } return tcm.RestoreBlockDataFromStorageCalled(body) @@ -97,12 +97,12 @@ func (tcm *TransactionCoordinatorMock) CreateBlockStarted() { tcm.CreateBlockStartedCalled() } -func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) { +func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessCrossShardTransactionsDstMe(header data.HeaderHandler, processedMiniBlocksHashes map[string]struct{}, maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) (block.MiniBlockSlice, uint32, bool) { if tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled == nil { return nil, 0, false } - return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, maxTxRemaining, maxMbRemaining, round, haveTime) + return tcm.CreateMbsAndProcessCrossShardTransactionsDstMeCalled(header, processedMiniBlocksHashes, maxTxRemaining, maxMbRemaining, round, haveTime) } func (tcm *TransactionCoordinatorMock) CreateMbsAndProcessTransactionsFromMe(maxTxRemaining uint32, maxMbRemaining uint32, round uint64, haveTime func() bool) block.MiniBlockSlice { diff --git a/process/mock/txTypeHandlerMock.go b/process/mock/txTypeHandlerMock.go new file mode 100644 index 00000000000..76cedc35360 --- /dev/null +++ b/process/mock/txTypeHandlerMock.go @@ -0,0 +1,26 @@ +package mock + +import ( + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/process" +) + +type TxTypeHandlerMock struct { + ComputeTransactionTypeCalled func(tx data.TransactionHandler) (process.TransactionType, error) +} + +func (th *TxTypeHandlerMock) ComputeTransactionType(tx data.TransactionHandler) (process.TransactionType, error) { + if th.ComputeTransactionTypeCalled == nil { + return process.MoveBalance, nil + } + + return th.ComputeTransactionTypeCalled(tx) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (th *TxTypeHandlerMock) IsInterfaceNil() bool { + if th == nil { + return true + } + return false +} diff --git a/process/mock/unsignedTxHandlerMock.go b/process/mock/unsignedTxHandlerMock.go new file mode 100644 index 00000000000..7e7175bdbff --- /dev/null +++ b/process/mock/unsignedTxHandlerMock.go @@ -0,0 +1,61 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" +) + +type UnsignedTxHandlerMock struct { + CleanProcessedUtxsCalled func() + ProcessTransactionFeeCalled func(cost *big.Int) + CreateAllUTxsCalled func() []data.TransactionHandler + VerifyCreatedUTxsCalled func() error + AddTxFeeFromBlockCalled func(tx data.TransactionHandler) +} + +func (ut *UnsignedTxHandlerMock) AddRewardTxFromBlock(tx data.TransactionHandler) { + if ut.AddTxFeeFromBlockCalled == nil { + return + } + + ut.AddTxFeeFromBlockCalled(tx) +} + +func (ut *UnsignedTxHandlerMock) CleanProcessedUTxs() { + if ut.CleanProcessedUtxsCalled == nil { + return + } + + ut.CleanProcessedUtxsCalled() +} + +func (ut *UnsignedTxHandlerMock) ProcessTransactionFee(cost *big.Int) { + if ut.ProcessTransactionFeeCalled == nil { + return + } + + ut.ProcessTransactionFeeCalled(cost) +} + +func (ut *UnsignedTxHandlerMock) CreateAllUTxs() []data.TransactionHandler { + if ut.CreateAllUTxsCalled == nil { + return nil + } + return ut.CreateAllUTxsCalled() +} + +func (ut *UnsignedTxHandlerMock) VerifyCreatedUTxs() error { + if ut.VerifyCreatedUTxsCalled == nil { + return nil + } + return ut.VerifyCreatedUTxsCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ut *UnsignedTxHandlerMock) IsInterfaceNil() bool { + if ut == nil { + return true + } + return false +} diff --git a/process/rewardTransaction/export_test.go b/process/rewardTransaction/export_test.go new file mode 100644 index 00000000000..301fd02f5f8 --- /dev/null +++ b/process/rewardTransaction/export_test.go @@ -0,0 +1,21 @@ +package rewardTransaction + +import ( + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" +) + +// Hasher will return the hasher of InterceptedRewardTransaction for using in test files +func (irt *InterceptedRewardTransaction) Hasher() hashing.Hasher { + return irt.hasher +} + +// Marshalizer will return the marshalizer of RewardTxInterceptor for using in test files +func (rti *RewardTxInterceptor) Marshalizer() marshal.Marshalizer { + return rti.marshalizer +} + +// BroadcastCallbackHandler will call the broadcast callback handler of RewardTxInterceptor for using in test files +func (rti *RewardTxInterceptor) BroadcastCallbackHandler(buffToSend []byte) { + rti.broadcastCallbackHandler(buffToSend) +} diff --git a/process/rewardTransaction/interceptedRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go new file mode 100644 index 00000000000..eb4cc1157df --- /dev/null +++ b/process/rewardTransaction/interceptedRewardTransaction.go @@ -0,0 +1,149 @@ +package rewardTransaction + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +// InterceptedRewardTransaction holds and manages a transaction based struct with extended functionality +type InterceptedRewardTransaction struct { + rTx *rewardTx.RewardTx + marshalizer marshal.Marshalizer + hasher hashing.Hasher + addrConv state.AddressConverter + coordinator sharding.Coordinator + hash []byte + rcvShard uint32 + sndShard uint32 + isAddressedToOtherShards bool +} + +// NewInterceptedRewardTransaction returns a new instance of InterceptedRewardTransaction +func NewInterceptedRewardTransaction( + rewardTxBuff []byte, + marshalizer marshal.Marshalizer, + hasher hashing.Hasher, + addrConv state.AddressConverter, + coordinator sharding.Coordinator, +) (*InterceptedRewardTransaction, error) { + + if rewardTxBuff == nil { + return nil, process.ErrNilBuffer + } + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if addrConv == nil || addrConv.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil || coordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + + rTx := &rewardTx.RewardTx{} + err := marshalizer.Unmarshal(rTx, rewardTxBuff) + if err != nil { + return nil, err + } + + inRewardTx := &InterceptedRewardTransaction{ + rTx: rTx, + marshalizer: marshalizer, + hasher: hasher, + addrConv: addrConv, + coordinator: coordinator, + } + + err = inRewardTx.processFields(rewardTxBuff) + if err != nil { + return nil, err + } + + err = inRewardTx.integrity() + if err != nil { + return nil, err + } + + err = inRewardTx.verifyIfNotarized(inRewardTx.hash) + if err != nil { + return nil, err + } + + return inRewardTx, nil +} + +func (inRTx *InterceptedRewardTransaction) processFields(rewardTxBuff []byte) error { + inRTx.hash = inRTx.hasher.Compute(string(rewardTxBuff)) + + rcvAddr, err := inRTx.addrConv.CreateAddressFromPublicKeyBytes(inRTx.rTx.RcvAddr) + if err != nil { + return process.ErrInvalidRcvAddr + } + + inRTx.rcvShard = inRTx.coordinator.ComputeId(rcvAddr) + inRTx.sndShard = inRTx.rTx.ShardId + + inRTx.isAddressedToOtherShards = inRTx.rcvShard != inRTx.coordinator.SelfId() && + inRTx.sndShard != inRTx.coordinator.SelfId() + + return nil +} + +// integrity checks for not nil fields and negative value +func (inRTx *InterceptedRewardTransaction) integrity() error { + if len(inRTx.rTx.RcvAddr) == 0 { + return process.ErrNilRcvAddr + } + + if inRTx.rTx.Value == nil { + return process.ErrNilValue + } + + if inRTx.rTx.Value.Cmp(big.NewInt(0)) < 0 { + return process.ErrNegativeValue + } + + return nil +} + +// verifyIfNotarized checks if the rewardTx was already notarized +func (inRTx *InterceptedRewardTransaction) verifyIfNotarized(rTxBuff []byte) error { + // TODO: implement this for flood protection purposes + // could verify if the epoch/round is behind last committed metachain block + return nil +} + +// RcvShard returns the receiver shard +func (inRTx *InterceptedRewardTransaction) RcvShard() uint32 { + return inRTx.rcvShard +} + +// SndShard returns the sender shard +func (inRTx *InterceptedRewardTransaction) SndShard() uint32 { + return inRTx.sndShard +} + +// IsAddressedToOtherShards returns true if this transaction is not meant to be processed by the node from this shard +func (inRTx *InterceptedRewardTransaction) IsAddressedToOtherShards() bool { + return inRTx.isAddressedToOtherShards +} + +// RewardTransaction returns the reward transaction pointer that actually holds the data +func (inRTx *InterceptedRewardTransaction) RewardTransaction() data.TransactionHandler { + return inRTx.rTx +} + +// Hash gets the hash of this transaction +func (inRTx *InterceptedRewardTransaction) Hash() []byte { + return inRTx.hash +} diff --git a/process/rewardTransaction/interceptedRewardTransaction_test.go b/process/rewardTransaction/interceptedRewardTransaction_test.go new file mode 100644 index 00000000000..ea1858af2c5 --- /dev/null +++ b/process/rewardTransaction/interceptedRewardTransaction_test.go @@ -0,0 +1,149 @@ +package rewardTransaction_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/stretchr/testify/assert" +) + +func TestNewInterceptedRewardTransaction_NilTxBuffShouldErr(t *testing.T) { + t.Parallel() + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + nil, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilBuffer, err) +} + +func TestNewInterceptedRewardTransaction_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + nil, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewInterceptedRewardTransaction_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + nil, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewInterceptedRewardTransaction_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewInterceptedRewardTransaction_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + txBuff := []byte("tx") + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + &mock.MarshalizerMock{}, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + nil) + + assert.Nil(t, irt) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewInterceptedRewardTransaction_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("receiver"), + ShardId: 0, + } + + marshalizer := &mock.MarshalizerMock{} + txBuff, _ := marshalizer.Marshal(rewTx) + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.NotNil(t, irt) + assert.Nil(t, err) +} + +func TestNewInterceptedRewardTransaction_TestGetters(t *testing.T) { + t.Parallel() + + shardId := uint32(0) + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("receiver"), + ShardId: shardId, + } + + marshalizer := &mock.MarshalizerMock{} + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return shardId + } + + txBuff, _ := marshalizer.Marshal(rewTx) + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &mock.HasherMock{}, + &mock.AddressConverterMock{}, + shardCoord) + + assert.NotNil(t, irt) + assert.Nil(t, err) + + assert.Equal(t, shardId, irt.RcvShard()) + assert.Equal(t, shardId, irt.SndShard()) + assert.Equal(t, &rewTx, irt.RewardTransaction()) + assert.False(t, irt.IsAddressedToOtherShards()) + + txHash := irt.Hasher().Compute(string(txBuff)) + assert.Equal(t, txHash, irt.Hash()) +} diff --git a/process/rewardTransaction/interceptor.go b/process/rewardTransaction/interceptor.go new file mode 100644 index 00000000000..2707c4ca34d --- /dev/null +++ b/process/rewardTransaction/interceptor.go @@ -0,0 +1,151 @@ +package rewardTransaction + +import ( + "github.com/ElrondNetwork/elrond-go/core/logger" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/hashing" + "github.com/ElrondNetwork/elrond-go/marshal" + "github.com/ElrondNetwork/elrond-go/p2p" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/storage" +) + +var log = logger.DefaultLogger() + +// RewardTxInterceptor is used for intercepting reward transactions and storing them into a datapool +type RewardTxInterceptor struct { + marshalizer marshal.Marshalizer + rewardTxPool dataRetriever.ShardedDataCacherNotifier + rewardTxStorer storage.Storer + addrConverter state.AddressConverter + hasher hashing.Hasher + shardCoordinator sharding.Coordinator + broadcastCallbackHandler func(buffToSend []byte) +} + +// NewRewardTxInterceptor hooks a new interceptor for reward transactions +func NewRewardTxInterceptor( + marshalizer marshal.Marshalizer, + rewardTxPool dataRetriever.ShardedDataCacherNotifier, + rewardTxStorer storage.Storer, + addrConverter state.AddressConverter, + hasher hashing.Hasher, + shardCoordinator sharding.Coordinator, +) (*RewardTxInterceptor, error) { + + if marshalizer == nil || marshalizer.IsInterfaceNil() { + return nil, process.ErrNilMarshalizer + } + if rewardTxPool == nil || rewardTxPool.IsInterfaceNil() { + return nil, process.ErrNilRewardTxDataPool + } + if rewardTxStorer == nil || rewardTxStorer.IsInterfaceNil() { + return nil, process.ErrNilRewardsTxStorage + } + if addrConverter == nil || addrConverter.IsInterfaceNil() { + return nil, process.ErrNilAddressConverter + } + if hasher == nil || hasher.IsInterfaceNil() { + return nil, process.ErrNilHasher + } + if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { + return nil, process.ErrNilShardCoordinator + } + + rewardTxIntercept := &RewardTxInterceptor{ + marshalizer: marshalizer, + rewardTxPool: rewardTxPool, + rewardTxStorer: rewardTxStorer, + hasher: hasher, + addrConverter: addrConverter, + shardCoordinator: shardCoordinator, + } + + return rewardTxIntercept, nil +} + +// ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received +// (for the topic this validator was registered to) +func (rti *RewardTxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + if message == nil || message.IsInterfaceNil() { + return process.ErrNilMessage + } + + if message.Data() == nil { + return process.ErrNilDataToProcess + } + + rewardTxsBuff := make([][]byte, 0) + err := rti.marshalizer.Unmarshal(&rewardTxsBuff, message.Data()) + if err != nil { + return err + } + if len(rewardTxsBuff) == 0 { + return process.ErrNoRewardTransactionInMessage + } + + filteredRTxBuffs := make([][]byte, 0) + lastErrEncountered := error(nil) + for _, rewardTxBuff := range rewardTxsBuff { + rewardTxIntercepted, err := NewInterceptedRewardTransaction( + rewardTxBuff, + rti.marshalizer, + rti.hasher, + rti.addrConverter, + rti.shardCoordinator) + + if err != nil { + lastErrEncountered = err + continue + } + + //reward tx is validated, add it to filtered out reward txs + filteredRTxBuffs = append(filteredRTxBuffs, rewardTxBuff) + if rewardTxIntercepted.IsAddressedToOtherShards() { + log.Debug("intercepted reward transaction is for other shards") + + continue + } + + go rti.processRewardTransaction(rewardTxIntercepted) + } + + var buffToSend []byte + filteredOutRTxsNeedToBeSend := len(filteredRTxBuffs) > 0 && lastErrEncountered != nil + if filteredOutRTxsNeedToBeSend { + buffToSend, err = rti.marshalizer.Marshal(filteredRTxBuffs) + if err != nil { + return err + } + } + + if rti.broadcastCallbackHandler != nil { + rti.broadcastCallbackHandler(buffToSend) + } + + return lastErrEncountered +} + +// SetBroadcastCallback sets the callback method to send filtered out message +func (rti *RewardTxInterceptor) SetBroadcastCallback(callback func(buffToSend []byte)) { + rti.broadcastCallbackHandler = callback +} + +func (rti *RewardTxInterceptor) processRewardTransaction(rTx *InterceptedRewardTransaction) { + cacherIdentifier := process.ShardCacherIdentifier(rTx.SndShard(), rTx.RcvShard()) + rti.rewardTxPool.AddData( + rTx.Hash(), + rTx.RewardTransaction(), + cacherIdentifier, + ) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rti *RewardTxInterceptor) IsInterfaceNil() bool { + if rti == nil { + return true + } + return false +} diff --git a/process/rewardTransaction/interceptor_test.go b/process/rewardTransaction/interceptor_test.go new file mode 100644 index 00000000000..5374d7cc68c --- /dev/null +++ b/process/rewardTransaction/interceptor_test.go @@ -0,0 +1,280 @@ +package rewardTransaction_test + +import ( + "encoding/json" + "math/big" + "sync/atomic" + "testing" + "time" + + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + nil, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilMarshalizer, err) +} + +func TestNewRewardTxInterceptor_NilRewardTxPoolShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + nil, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilRewardTxDataPool, err) +} + +func TestNewRewardTxInterceptor_NilRewardTxStorerShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + nil, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilRewardsTxStorage, err) +} + +func TestNewRewardTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + nil, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxInterceptor_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + nil, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilHasher, err) +} + +func TestNewRewardTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + nil) + + assert.Nil(t, rti) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxInterceptor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rti, err := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + assert.NotNil(t, rti) + assert.Nil(t, err) + assert.False(t, rti.IsInterfaceNil()) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { + t.Parallel() + + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + err := rti.ProcessReceivedMessage(nil) + assert.Equal(t, process.ErrNilMessage, err) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageNilDataShouldErr(t *testing.T) { + t.Parallel() + + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + message := &mock.P2PMessageMock{ + DataField: nil, + } + + err := rti.ProcessReceivedMessage(message) + assert.Equal(t, process.ErrNilDataToProcess, err) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageIntraShardShouldWork(t *testing.T) { + t.Parallel() + + wasCalled := int32(0) + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + atomic.StoreInt32(&wasCalled, 1) + }, + }, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + rewardTx1 := rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr1"), + ShardId: 0, + } + rewardTxBytes1, _ := rti.Marshalizer().Marshal(rewardTx1) + + rewardTx2 := rewardTx.RewardTx{ + Round: 0, + Epoch: 1, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr2"), + ShardId: 0, + } + rewardTxBytes2, _ := rti.Marshalizer().Marshal(rewardTx2) + + var rewardTxsSlice [][]byte + rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) + rewardTxsBuff, _ := json.Marshal(rewardTxsSlice) + + message := &mock.P2PMessageMock{ + DataField: rewardTxsBuff, + } + + err := rti.ProcessReceivedMessage(message) + time.Sleep(20 * time.Millisecond) + + assert.Nil(t, err) + assert.Equal(t, int32(1), atomic.LoadInt32(&wasCalled)) +} + +func TestRewardTxInterceptor_ProcessReceivedMessageCrossShardShouldNotAdd(t *testing.T) { + t.Parallel() + + wasCalled := int32(0) + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return uint32(1) + } + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{ + AddDataCalled: func(key []byte, data interface{}, cacheId string) { + atomic.StoreInt32(&wasCalled, 1) + }, + }, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + shardCoord) + + rewardTx1 := rewardTx.RewardTx{ + Round: 1, + Epoch: 0, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr1"), + ShardId: 1, + } + rewardTxBytes1, _ := rti.Marshalizer().Marshal(rewardTx1) + + rewardTx2 := rewardTx.RewardTx{ + Round: 0, + Epoch: 1, + Value: new(big.Int).SetInt64(157), + RcvAddr: []byte("rcvr2"), + ShardId: 1, + } + rewardTxBytes2, _ := rti.Marshalizer().Marshal(rewardTx2) + + var rewardTxsSlice [][]byte + rewardTxsSlice = append(rewardTxsSlice, rewardTxBytes1, rewardTxBytes2) + rewardTxsBuff, _ := json.Marshal(rewardTxsSlice) + + message := &mock.P2PMessageMock{ + DataField: rewardTxsBuff, + } + + err := rti.ProcessReceivedMessage(message) + time.Sleep(20 * time.Millisecond) + assert.Nil(t, err) + // check that AddData was not called, as tx is cross shard + assert.Equal(t, int32(0), atomic.LoadInt32(&wasCalled)) +} + +func TestRewardTxInterceptor_SetBroadcastCallback(t *testing.T) { + t.Parallel() + + rti, _ := rewardTransaction.NewRewardTxInterceptor( + &mock.MarshalizerMock{}, + &mock.ShardedDataStub{}, + &mock.StorerStub{}, + &mock.AddressConverterMock{}, + &mock.HasherMock{}, + mock.NewMultiShardsCoordinatorMock(3)) + + bytesToSend := []byte("test") + var bytesToReceive []byte + rti.SetBroadcastCallback(func(buffToSend []byte) { + bytesToReceive = buffToSend + return + }) + + rti.BroadcastCallbackHandler(bytesToSend) + assert.Equal(t, bytesToSend, bytesToReceive) +} diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go new file mode 100644 index 00000000000..9f0002826ff --- /dev/null +++ b/process/rewardTransaction/process.go @@ -0,0 +1,115 @@ +package rewardTransaction + +import ( + "math/big" + "sync" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type rewardTxProcessor struct { + accounts state.AccountsAdapter + adrConv state.AddressConverter + shardCoordinator sharding.Coordinator + + mutRewardsForwarder sync.Mutex + rewardTxForwarder process.IntermediateTransactionHandler +} + +// NewRewardTxProcessor creates a rewardTxProcessor instance +func NewRewardTxProcessor( + accountsDB state.AccountsAdapter, + adrConv state.AddressConverter, + coordinator sharding.Coordinator, + rewardTxForwarder process.IntermediateTransactionHandler, +) (*rewardTxProcessor, error) { + if accountsDB == nil { + return nil, process.ErrNilAccountsAdapter + } + if adrConv == nil { + return nil, process.ErrNilAddressConverter + } + if coordinator == nil { + return nil, process.ErrNilShardCoordinator + } + if rewardTxForwarder == nil { + return nil, process.ErrNilIntermediateTransactionHandler + } + + return &rewardTxProcessor{ + accounts: accountsDB, + adrConv: adrConv, + shardCoordinator: coordinator, + rewardTxForwarder: rewardTxForwarder, + }, nil +} + +func (rtp *rewardTxProcessor) getAccountFromAddress(address []byte) (state.AccountHandler, error) { + addr, err := rtp.adrConv.CreateAddressFromPublicKeyBytes(address) + if err != nil { + return nil, err + } + + shardForCurrentNode := rtp.shardCoordinator.SelfId() + shardForAddr := rtp.shardCoordinator.ComputeId(addr) + if shardForCurrentNode != shardForAddr { + return nil, nil + } + + acnt, err := rtp.accounts.GetAccountWithJournal(addr) + if err != nil { + return nil, err + } + + return acnt, nil +} + +// ProcessRewardTransaction updates the account state from the reward transaction +func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) error { + if rTx == nil { + return process.ErrNilRewardTransaction + } + if rTx.Value == nil { + return process.ErrNilValueFromRewardTransaction + } + + rtp.mutRewardsForwarder.Lock() + err := rtp.rewardTxForwarder.AddIntermediateTransactions([]data.TransactionHandler{rTx}) + rtp.mutRewardsForwarder.Unlock() + if err != nil { + return err + } + + accHandler, err := rtp.getAccountFromAddress(rTx.RcvAddr) + if err != nil { + return err + } + + if accHandler == nil || accHandler.IsInterfaceNil() { + // address from different shard + return nil + } + + rewardAcc, ok := accHandler.(*state.Account) + if !ok { + return process.ErrWrongTypeAssertion + } + + operation := big.NewInt(0) + operation = operation.Add(rTx.Value, rewardAcc.Balance) + err = rewardAcc.SetBalanceWithJournal(operation) + + return err +} + +// IsInterfaceNil returns true if there is no value under the interface +func (rtp *rewardTxProcessor) IsInterfaceNil() bool { + if rtp == nil { + return true + } + return false +} diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go new file mode 100644 index 00000000000..800940e1431 --- /dev/null +++ b/process/rewardTransaction/process_test.go @@ -0,0 +1,287 @@ +package rewardTransaction_test + +import ( + "errors" + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/data" + "github.com/ElrondNetwork/elrond-go/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/data/state" + "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/mock" + "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" + "github.com/stretchr/testify/assert" +) + +func TestNewRewardTxProcessor_NilAccountsDbShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + nil, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAccountsAdapter, err) +} + +func TestNewRewardTxProcessor_NilAddressConverterShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + nil, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilAddressConverter, err) +} + +func TestNewRewardTxProcessor_NilShardCoordinatorShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + nil, + &mock.IntermediateTransactionHandlerMock{}) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilShardCoordinator, err) +} + +func TestNewRewardTxProcessor_NilRewardTxForwarderShouldErr(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + nil) + + assert.Nil(t, rtp) + assert.Equal(t, process.ErrNilIntermediateTransactionHandler, err) +} + +func TestNewRewardTxProcessor_OkValsShouldWork(t *testing.T) { + t.Parallel() + + rtp, err := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + assert.NotNil(t, rtp) + assert.Nil(t, err) + assert.False(t, rtp.IsInterfaceNil()) +} + +func TestRewardTxProcessor_ProcessRewardTransactionNilTxShouldErr(t *testing.T) { + t.Parallel() + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + err := rtp.ProcessRewardTransaction(nil) + assert.Equal(t, process.ErrNilRewardTransaction, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionNilTxValueShouldErr(t *testing.T) { + t.Parallel() + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{Value: nil} + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, process.ErrNilValueFromRewardTransaction, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotCreateAddressShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot create address") + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterStub{ + CreateAddressFromPublicKeyBytesCalled: func(pubKey []byte) (state.AddressContainer, error) { + return nil, expectedErr + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionAddressNotInNodesShardShouldNotExecute(t *testing.T) { + t.Parallel() + + getAccountWithJournalWasCalled := false + shardCoord := mock.NewMultiShardsCoordinatorMock(3) + shardCoord.ComputeIdCalled = func(address state.AddressContainer) uint32 { + return uint32(5) + } + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + getAccountWithJournalWasCalled = true + return nil, nil + }, + }, + &mock.AddressConverterMock{}, + shardCoord, + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + // account should not be requested as the address is not in node's shard + assert.False(t, getAccountWithJournalWasCalled) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotGetAccountShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot get account") + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return nil, expectedErr + }, + }, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionCannotAddIntermediateTxsShouldErr(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("cannot add intermediate transactions") + rtp, _ := rewardTransaction.NewRewardTxProcessor( + &mock.AccountsStub{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{ + AddIntermediateTransactionsCalled: func(txs []data.TransactionHandler) error { + return expectedErr + }, + }) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, expectedErr, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionWrongTypeAssertionAccountHolderShouldErr(t *testing.T) { + t.Parallel() + + accountsDb := &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + return mock.NewAccountWrapMock(addressContainer, &mock.AccountTrackerStub{}), nil + }, + } + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + accountsDb, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, process.ErrWrongTypeAssertion, err) +} + +func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { + t.Parallel() + + journalizeWasCalled := false + saveAccountWasCalled := false + + accountsDb := &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (state.AccountHandler, error) { + ats := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeWasCalled = true + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountWasCalled = true + return nil + }, + } + return state.NewAccount(addressContainer, ats) + }, + } + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + accountsDb, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &mock.IntermediateTransactionHandlerMock{}) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: new(big.Int).SetInt64(100), + RcvAddr: []byte("rcvr"), + ShardId: 0, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Nil(t, err) + assert.True(t, journalizeWasCalled) + assert.True(t, saveAccountWasCalled) +} diff --git a/process/smartContract/export_test.go b/process/smartContract/export_test.go index 5ea1d1bcad3..d22eaef1f23 100644 --- a/process/smartContract/export_test.go +++ b/process/smartContract/export_test.go @@ -22,11 +22,21 @@ func (sc *scProcessor) CreateVMInput(tx *transaction.Transaction) (*vmcommon.VMI return sc.createVMInput(tx) } -func (sc *scProcessor) ProcessVMOutput(vmOutput *vmcommon.VMOutput, tx *transaction.Transaction, acntSnd state.AccountHandler, round uint64) ([]data.TransactionHandler, error) { +func (sc *scProcessor) ProcessVMOutput( + vmOutput *vmcommon.VMOutput, + tx *transaction.Transaction, + acntSnd state.AccountHandler, + round uint64, +) ([]data.TransactionHandler, *big.Int, error) { return sc.processVMOutput(vmOutput, tx, acntSnd, round) } -func (sc *scProcessor) RefundGasToSender(gasRefund *big.Int, tx *transaction.Transaction, txHash []byte, acntSnd state.AccountHandler) (*smartContractResult.SmartContractResult, error) { +func (sc *scProcessor) RefundGasToSender( + gasRefund *big.Int, + tx *transaction.Transaction, + txHash []byte, + acntSnd state.AccountHandler, +) (*smartContractResult.SmartContractResult, *big.Int, error) { return sc.refundGasToSender(gasRefund, tx, txHash, acntSnd) } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 2dc1d262b3a..8506fada4f0 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -17,7 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/smartContract/hooks" "github.com/ElrondNetwork/elrond-go/sharding" - "github.com/ElrondNetwork/elrond-vm-common" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" ) type scExecutionState struct { @@ -41,6 +41,7 @@ type scProcessor struct { mapExecState map[uint64]scExecutionState scrForwarder process.IntermediateTransactionHandler + txFeeHandler process.TransactionFeeHandler } var log = logger.DefaultLogger() @@ -56,6 +57,7 @@ func NewSmartContractProcessor( adrConv state.AddressConverter, coordinator sharding.Coordinator, scrForwarder process.IntermediateTransactionHandler, + txFeeHandler process.TransactionFeeHandler, ) (*scProcessor, error) { if vmContainer == nil || vmContainer.IsInterfaceNil() { return nil, process.ErrNoVM @@ -84,6 +86,9 @@ func NewSmartContractProcessor( if scrForwarder == nil || scrForwarder.IsInterfaceNil() { return nil, process.ErrNilIntermediateTransactionHandler } + if txFeeHandler == nil { + return nil, process.ErrNilUnsignedTxHandler + } return &scProcessor{ vmContainer: vmContainer, @@ -95,6 +100,7 @@ func NewSmartContractProcessor( adrConv: adrConv, shardCoordinator: coordinator, scrForwarder: scrForwarder, + txFeeHandler: txFeeHandler, mapExecState: make(map[uint64]scExecutionState)}, nil } @@ -186,7 +192,7 @@ func (sc *scProcessor) ExecuteSmartContractTransaction( } // VM is formally verified and the output is correct - crossTxs, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) if err != nil { return err } @@ -196,6 +202,8 @@ func (sc *scProcessor) ExecuteSmartContractTransaction( return err } + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + return nil } @@ -281,7 +289,7 @@ func (sc *scProcessor) DeploySmartContract( return err } - crossTxs, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) + crossTxs, consumedFee, err := sc.processVMOutput(vmOutput, tx, acntSnd, round) if err != nil { return err } @@ -291,6 +299,8 @@ func (sc *scProcessor) DeploySmartContract( return err } + sc.txFeeHandler.ProcessTransactionFee(consumedFee) + return nil } @@ -397,6 +407,10 @@ func (sc *scProcessor) processSCPayment(tx *transaction.Transaction, acntSnd sta return process.ErrWrongTypeAssertion } + if stAcc.Balance.Cmp(cost) < 0 { + return process.ErrInsufficientFunds + } + totalCost := big.NewInt(0) err = stAcc.SetBalanceWithJournal(totalCost.Sub(stAcc.Balance, cost)) if err != nil { @@ -411,23 +425,23 @@ func (sc *scProcessor) processVMOutput( tx *transaction.Transaction, acntSnd state.AccountHandler, round uint64, -) ([]data.TransactionHandler, error) { +) ([]data.TransactionHandler, *big.Int, error) { if vmOutput == nil { - return nil, process.ErrNilVMOutput + return nil, nil, process.ErrNilVMOutput } if tx == nil { - return nil, process.ErrNilTransaction + return nil, nil, process.ErrNilTransaction } txBytes, err := sc.marshalizer.Marshal(tx) if err != nil { - return nil, err + return nil, nil, err } txHash := sc.hasher.Compute(string(txBytes)) err = sc.saveSCOutputToCurrentState(vmOutput, round, txHash) if err != nil { - return nil, err + return nil, nil, err } if vmOutput.ReturnCode != vmcommon.Ok { @@ -439,38 +453,38 @@ func (sc *scProcessor) processVMOutput( stAcc, ok := acntSnd.(*state.Account) if !ok { - return nil, process.ErrWrongTypeAssertion + return nil, nil, process.ErrWrongTypeAssertion } totalCost := big.NewInt(0) err = stAcc.SetBalanceWithJournal(totalCost.Add(stAcc.Balance, tx.Value)) if err != nil { - return nil, err + return nil, nil, err } - return nil, nil + return nil, nil, nil } err = sc.processSCOutputAccounts(vmOutput.OutputAccounts, tx) if err != nil { - return nil, err + return nil, nil, err } scrTxs, err := sc.createSCRTransactions(vmOutput.OutputAccounts, tx, txHash) if err != nil { - return nil, err + return nil, nil, err } acntSnd, err = sc.reloadLocalSndAccount(acntSnd) if err != nil { - return nil, err + return nil, nil, err } totalGasRefund := big.NewInt(0) totalGasRefund = totalGasRefund.Add(vmOutput.GasRefund, vmOutput.GasRemaining) - scrRefund, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) + scrRefund, consumedFee, err := sc.refundGasToSender(totalGasRefund, tx, txHash, acntSnd) if err != nil { - return nil, err + return nil, nil, err } if scrRefund != nil { @@ -479,15 +493,15 @@ func (sc *scProcessor) processVMOutput( err = sc.deleteAccounts(vmOutput.DeletedAccounts) if err != nil { - return nil, err + return nil, nil, err } err = sc.processTouchedAccounts(vmOutput.TouchedAccounts) if err != nil { - return nil, err + return nil, nil, err } - return scrTxs, nil + return scrTxs, consumedFee, nil } // reloadLocalSndAccount will reload from current account state the sender account @@ -545,13 +559,16 @@ func (sc *scProcessor) refundGasToSender( tx *transaction.Transaction, txHash []byte, acntSnd state.AccountHandler, -) (*smartContractResult.SmartContractResult, error) { +) (*smartContractResult.SmartContractResult, *big.Int, error) { + consumedFee := big.NewInt(0) + consumedFee = consumedFee.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) if gasRefund == nil || gasRefund.Cmp(big.NewInt(0)) <= 0 { - return nil, nil + return nil, consumedFee, nil } refundErd := big.NewInt(0) refundErd = refundErd.Mul(gasRefund, big.NewInt(int64(tx.GasPrice))) + consumedFee = consumedFee.Sub(consumedFee, refundErd) scTx := &smartContractResult.SmartContractResult{} scTx.Value = refundErd @@ -561,21 +578,21 @@ func (sc *scProcessor) refundGasToSender( scTx.TxHash = txHash if acntSnd == nil || acntSnd.IsInterfaceNil() { - return scTx, nil + return scTx, consumedFee, nil } stAcc, ok := acntSnd.(*state.Account) if !ok { - return nil, process.ErrWrongTypeAssertion + return nil, nil, process.ErrWrongTypeAssertion } newBalance := big.NewInt(0).Add(stAcc.Balance, refundErd) err := stAcc.SetBalanceWithJournal(newBalance) if err != nil { - return nil, err + return nil, nil, err } - return scTx, nil + return scTx, consumedFee, nil } // save account changes in state from vmOutput - protected by VM - every output can be treated as is. diff --git a/process/smartContract/process_test.go b/process/smartContract/process_test.go index 66c947ca098..652a3bda2a2 100644 --- a/process/smartContract/process_test.go +++ b/process/smartContract/process_test.go @@ -10,6 +10,7 @@ import ( "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-vm-common" "github.com/pkg/errors" @@ -65,7 +66,9 @@ func TestNewSmartContractProcessorNilVM(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNoVM, err) @@ -83,7 +86,9 @@ func TestNewSmartContractProcessorNilArgsParser(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilArgumentParser, err) @@ -101,7 +106,9 @@ func TestNewSmartContractProcessorNilHasher(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilHasher, err) @@ -119,7 +126,9 @@ func TestNewSmartContractProcessorNilMarshalizer(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -137,7 +146,9 @@ func TestNewSmartContractProcessorNilAccountsDB(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -155,7 +166,9 @@ func TestNewSmartContractProcessorNilAdrConv(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, nil, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -173,7 +186,9 @@ func TestNewSmartContractProcessorNilShardCoordinator(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, nil, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -191,7 +206,9 @@ func TestNewSmartContractProcessorNilFakeAccountsHandler(t *testing.T) { nil, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilTemporaryAccountsHandler, err) @@ -209,7 +226,9 @@ func TestNewSmartContractProcessor_NilIntermediateMock(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - nil) + nil, + &mock.UnsignedTxHandlerMock{}, + ) assert.Nil(t, sc) assert.Equal(t, process.ErrNilIntermediateTransactionHandler, err) @@ -227,7 +246,9 @@ func TestNewSmartContractProcessor(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -245,7 +266,9 @@ func TestScProcessor_ComputeTransactionTypeNil(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -266,7 +289,9 @@ func TestScProcessor_ComputeTransactionTypeNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -294,7 +319,9 @@ func TestScProcessor_ComputeTransactionTypeErrWrongTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -313,18 +340,18 @@ func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { t.Parallel() addressConverter := &mock.AddressConverterMock{} - sc, err := NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{}, - &mock.TemporaryAccountsHandlerMock{}, + + txTypeHandler, err := coordinator.NewTxTypeHandler( addressConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return nil, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) tx := &transaction.Transaction{} @@ -334,7 +361,7 @@ func TestScProcessor_ComputeTransactionTypeScDeployment(t *testing.T) { tx.Data = "data" tx.Value = big.NewInt(45) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.SCDeployment, txType) } @@ -353,23 +380,20 @@ func TestScProcessor_ComputeTransactionTypeScInvoking(t *testing.T) { _, acntDst := createAccounts(tx) acntDst.SetCode([]byte("code")) - sc, err := NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, + txTypeHandler, err := coordinator.NewTxTypeHandler( addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.SCInvoking, txType) } @@ -387,23 +411,20 @@ func TestScProcessor_ComputeTransactionTypeMoveBalance(t *testing.T) { _, acntDst := createAccounts(tx) - sc, err := NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - &mock.HasherMock{}, - &mock.MarshalizerMock{}, - &mock.AccountsStub{GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { - return acntDst, nil - }}, - &mock.TemporaryAccountsHandlerMock{}, + txTypeHandler, err := coordinator.NewTxTypeHandler( addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.AccountsStub{ + GetAccountWithJournalCalled: func(addressContainer state.AddressContainer) (handler state.AccountHandler, e error) { + return acntDst, nil + }, + }, + ) - assert.NotNil(t, sc) + assert.NotNil(t, txTypeHandler) assert.Nil(t, err) - txType, err := sc.ComputeTransactionType(tx) + txType, err := txTypeHandler.ComputeTransactionType(tx) assert.Nil(t, err) assert.Equal(t, process.MoveBalance, txType) } @@ -423,7 +444,9 @@ func TestScProcessor_DeploySmartContractBadParse(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -458,7 +481,9 @@ func TestScProcessor_DeploySmartContractRunError(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -505,7 +530,9 @@ func TestScProcessor_DeploySmartContractWrongTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -537,7 +564,9 @@ func TestScProcessor_DeploySmartContract(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConverter, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -578,7 +607,9 @@ func TestScProcessor_ExecuteSmartContractTransactionNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -608,7 +639,9 @@ func TestScProcessor_ExecuteSmartContractTransactionNilAccount(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -646,7 +679,9 @@ func TestScProcessor_ExecuteSmartContractTransactionBadParser(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -681,7 +716,9 @@ func TestScProcessor_ExecuteSmartContractTransactionVMRunError(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -722,7 +759,9 @@ func TestScProcessor_ExecuteSmartContractTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -757,7 +796,9 @@ func TestScProcessor_CreateVMCallInputWrongCode(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -791,7 +832,9 @@ func TestScProcessor_CreateVMCallInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -821,7 +864,9 @@ func TestScProcessor_CreateVMDeployInputBadFunction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -863,7 +908,9 @@ func TestScProcessor_CreateVMDeployInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -901,7 +948,9 @@ func TestScProcessor_CreateVMDeployInputNotEnoughArguments(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -932,7 +981,9 @@ func TestScProcessor_CreateVMInputWrongArgument(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -966,7 +1017,9 @@ func TestScProcessor_CreateVMInput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1009,13 +1062,15 @@ func TestScProcessor_processVMOutputNilVMOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) acntSrc, _, tx := createAccountsAndTransaction() - _, err = sc.processVMOutput(nil, tx, acntSrc, 10) + _, _, err = sc.processVMOutput(nil, tx, acntSrc, 10) assert.Equal(t, process.ErrNilVMOutput, err) } @@ -1033,14 +1088,16 @@ func TestScProcessor_processVMOutputNilTx(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) acntSrc, _, _ := createAccountsAndTransaction() vmOutput := &vmcommon.VMOutput{} - _, err = sc.processVMOutput(vmOutput, nil, acntSrc, 10) + _, _, err = sc.processVMOutput(vmOutput, nil, acntSrc, 10) assert.Equal(t, process.ErrNilTransaction, err) } @@ -1058,7 +1115,9 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1068,7 +1127,7 @@ func TestScProcessor_processVMOutputNilSndAcc(t *testing.T) { GasRefund: big.NewInt(0), GasRemaining: big.NewInt(0), } - _, err = sc.processVMOutput(vmOutput, tx, nil, 10) + _, _, err = sc.processVMOutput(vmOutput, tx, nil, 10) assert.Nil(t, err) } @@ -1087,7 +1146,9 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1103,7 +1164,7 @@ func TestScProcessor_processVMOutputNilDstAcc(t *testing.T) { } tx.Value = big.NewInt(0) - _, err = sc.processVMOutput(vmOutput, tx, acntSnd, 10) + _, _, err = sc.processVMOutput(vmOutput, tx, acntSnd, 10) assert.Nil(t, err) } @@ -1132,7 +1193,9 @@ func TestScProcessor_GetAccountFromAddressAccNotFound(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1170,7 +1233,9 @@ func TestScProcessor_GetAccountFromAddrFaildAddressConv(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1208,7 +1273,9 @@ func TestScProcessor_GetAccountFromAddrFailedGetExistingAccount(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1246,7 +1313,9 @@ func TestScProcessor_GetAccountFromAddrAccNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1285,7 +1354,9 @@ func TestScProcessor_GetAccountFromAddr(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1325,7 +1396,9 @@ func TestScProcessor_DeleteAccountsFailedAtRemove(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1365,7 +1438,9 @@ func TestScProcessor_DeleteAccountsNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1410,7 +1485,9 @@ func TestScProcessor_DeleteAccountsInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, addrConv, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1434,7 +1511,9 @@ func TestScProcessor_ProcessSCPaymentAccNotInShardShouldNotReturnError(t *testin &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1464,7 +1543,9 @@ func TestScProcessor_ProcessSCPaymentWrongTypeAssertion(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1486,6 +1567,45 @@ func TestScProcessor_ProcessSCPaymentWrongTypeAssertion(t *testing.T) { assert.Equal(t, process.ErrWrongTypeAssertion, err) } +func TestScProcessor_ProcessSCPaymentNotEnoughBalance(t *testing.T) { + t.Parallel() + + sc, err := NewSmartContractProcessor( + &mock.VMContainerMock{}, + &mock.ArgumentParserMock{}, + &mock.HasherMock{}, + &mock.MarshalizerMock{}, + &mock.AccountsStub{}, + &mock.TemporaryAccountsHandlerMock{}, + &mock.AddressConverterMock{}, + mock.NewMultiShardsCoordinatorMock(5), + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) + + assert.NotNil(t, sc) + assert.Nil(t, err) + + tx := &transaction.Transaction{} + tx.Nonce = 1 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + + tx.Value = big.NewInt(45) + tx.GasPrice = 10 + tx.GasLimit = 15 + + acntSrc, _ := createAccounts(tx) + stAcc, _ := acntSrc.(*state.Account) + stAcc.Balance = big.NewInt(45) + + currBalance := acntSrc.(*state.Account).Balance.Uint64() + + err = sc.ProcessSCPayment(tx, acntSrc) + assert.Equal(t, process.ErrInsufficientFunds, err) + assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) +} + func TestScProcessor_ProcessSCPayment(t *testing.T) { t.Parallel() @@ -1498,7 +1618,9 @@ func TestScProcessor_ProcessSCPayment(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1533,7 +1655,9 @@ func TestScProcessor_RefundGasToSenderNilAndZeroRefund(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1552,11 +1676,11 @@ func TestScProcessor_RefundGasToSenderNilAndZeroRefund(t *testing.T) { acntSrc, _ := createAccounts(tx) currBalance := acntSrc.(*state.Account).Balance.Uint64() - _, err = sc.refundGasToSender(nil, tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(nil, tx, txHash, acntSrc) assert.Nil(t, err) assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) - _, err = sc.refundGasToSender(big.NewInt(0), tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(big.NewInt(0), tx, txHash, acntSrc) assert.Nil(t, err) assert.Equal(t, currBalance, acntSrc.(*state.Account).Balance.Uint64()) } @@ -1573,7 +1697,9 @@ func TestScProcessor_RefundGasToSenderAccNotInShard(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1589,17 +1715,19 @@ func TestScProcessor_RefundGasToSenderAccNotInShard(t *testing.T) { txHash := []byte("txHash") acntSrc, _ := createAccounts(tx) - sctx, err := sc.refundGasToSender(big.NewInt(10), tx, txHash, nil) + sctx, consumed, err := sc.refundGasToSender(big.NewInt(10), tx, txHash, nil) assert.Nil(t, err) assert.NotNil(t, sctx) + assert.Equal(t, 0, consumed.Cmp(big.NewInt(0))) acntSrc = nil - sctx, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, acntSrc) + sctx, consumed, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, acntSrc) assert.Nil(t, err) assert.NotNil(t, sctx) + assert.Equal(t, 0, consumed.Cmp(big.NewInt(0))) badAcc := &mock.AccountWrapMock{} - sctx, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, badAcc) + sctx, consumed, err = sc.refundGasToSender(big.NewInt(10), tx, txHash, badAcc) assert.Equal(t, process.ErrWrongTypeAssertion, err) assert.Nil(t, sctx) } @@ -1616,7 +1744,9 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1634,7 +1764,7 @@ func TestScProcessor_RefundGasToSender(t *testing.T) { currBalance := acntSrc.(*state.Account).Balance.Uint64() refundGas := big.NewInt(10) - _, err = sc.refundGasToSender(refundGas, tx, txHash, acntSrc) + _, _, err = sc.refundGasToSender(refundGas, tx, txHash, acntSrc) assert.Nil(t, err) totalRefund := refundGas.Uint64() * tx.GasPrice @@ -1656,11 +1786,13 @@ func TestScProcessor_processVMOutputNilOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) - _, err = sc.ProcessVMOutput(nil, tx, acntSrc, round) + _, _, err = sc.ProcessVMOutput(nil, tx, acntSrc, round) assert.Equal(t, process.ErrNilVMOutput, err) } @@ -1680,12 +1812,14 @@ func TestScProcessor_processVMOutputNilTransaction(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) vmOutput := &vmcommon.VMOutput{} - _, err = sc.ProcessVMOutput(vmOutput, nil, acntSrc, round) + _, _, err = sc.ProcessVMOutput(vmOutput, nil, acntSrc, round) assert.Equal(t, process.ErrNilTransaction, err) } @@ -1706,7 +1840,9 @@ func TestScProcessor_processVMOutput(t *testing.T) { &mock.TemporaryAccountsHandlerMock{}, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1720,7 +1856,7 @@ func TestScProcessor_processVMOutput(t *testing.T) { } tx.Value = big.NewInt(0) - _, err = sc.ProcessVMOutput(vmOutput, tx, acntSrc, round) + _, _, err = sc.ProcessVMOutput(vmOutput, tx, acntSrc, round) assert.Nil(t, err) } @@ -1740,7 +1876,9 @@ func TestScProcessor_processSCOutputAccounts(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, mock.NewMultiShardsCoordinatorMock(5), - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1819,7 +1957,9 @@ func TestScProcessor_processSCOutputAccountsNotInShard(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1858,7 +1998,9 @@ func TestScProcessor_CreateCrossShardTransactions(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1901,7 +2043,9 @@ func TestScProcessor_ProcessSmartContractResultNilScr(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1927,7 +2071,9 @@ func TestScProcessor_ProcessSmartContractResultErrGetAccount(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1951,7 +2097,9 @@ func TestScProcessor_ProcessSmartContractResultAccNotInShard(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -1980,7 +2128,9 @@ func TestScProcessor_ProcessSmartContractResultBadAccType(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -2012,7 +2162,9 @@ func TestScProcessor_ProcessSmartContractResultOutputBalanceNil(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -2050,7 +2202,9 @@ func TestScProcessor_ProcessSmartContractResultWithCode(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) @@ -2092,7 +2246,9 @@ func TestScProcessor_ProcessSmartContractResultWithData(t *testing.T) { fakeAccountsHandler, &mock.AddressConverterMock{}, shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) + &mock.IntermediateTransactionHandlerMock{}, + &mock.UnsignedTxHandlerMock{}, + ) assert.NotNil(t, sc) assert.Nil(t, err) diff --git a/process/sync/baseForkDetector.go b/process/sync/baseForkDetector.go index aa082ceac5b..5bccaa97a80 100644 --- a/process/sync/baseForkDetector.go +++ b/process/sync/baseForkDetector.go @@ -185,10 +185,8 @@ func (bfd *baseForkDetector) RemoveHeaders(nonce uint64, hash []byte) { var preservedHdrInfos []*headerInfo - bfd.mutHeaders.RLock() + bfd.mutHeaders.Lock() hdrInfos := bfd.headers[nonce] - bfd.mutHeaders.RUnlock() - for _, hdrInfoStored := range hdrInfos { if bytes.Equal(hdrInfoStored.hash, hash) { continue @@ -197,7 +195,6 @@ func (bfd *baseForkDetector) RemoveHeaders(nonce uint64, hash []byte) { preservedHdrInfos = append(preservedHdrInfos, hdrInfoStored) } - bfd.mutHeaders.Lock() if preservedHdrInfos == nil { delete(bfd.headers, nonce) } else { @@ -450,3 +447,23 @@ func (bfd *baseForkDetector) shouldSignalFork( return shouldSignalFork } + +func (bfd *baseForkDetector) shouldAddBlockInForkDetector( + header data.HeaderHandler, + state process.BlockHeaderState, + finality int64, +) error { + + noncesDifference := int64(bfd.ProbableHighestNonce()) - int64(header.GetNonce()) + isSyncing := state == process.BHReceived && noncesDifference > process.MaxNoncesDifference + if state == process.BHProcessed || isSyncing { + return nil + } + + roundTooOld := int64(header.GetRound()) < bfd.rounder.Index()-finality + if roundTooOld { + return ErrLowerRoundInBlock + } + + return nil +} diff --git a/process/sync/baseForkDetector_test.go b/process/sync/baseForkDetector_test.go index 85a189e2c13..6ed06afb577 100644 --- a/process/sync/baseForkDetector_test.go +++ b/process/sync/baseForkDetector_test.go @@ -761,3 +761,112 @@ func TestBasicForkDetector_GetProbableHighestNonce(t *testing.T) { hInfos = bfd.GetHeaders(3) assert.Equal(t, uint64(3), bfd.GetProbableHighestNonce(hInfos)) } + +func TestShardForkDetector_ShouldAddBlockInForkDetectorShouldWork(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + sfd, _ := sync.NewShardForkDetector(rounderMock) + + hdr := &block.Header{Nonce: 1, Round: 1} + err := sfd.ShouldAddBlockInForkDetector(hdr, process.BHProcessed, process.ShardBlockFinality) + assert.Nil(t, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.ShardBlockFinality) + assert.Nil(t, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference) + hdr.Round = uint64(rounderMock.RoundIndex - process.ShardBlockFinality) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.ShardBlockFinality) + assert.Nil(t, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.ShardBlockFinality) + assert.Nil(t, err) +} + +func TestShardForkDetector_ShouldAddBlockInForkDetectorShouldErrLowerRoundInBlock(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + sfd, _ := sync.NewShardForkDetector(rounderMock) + hdr := &block.Header{Nonce: 1, Round: 1} + + hdr.Round = uint64(rounderMock.RoundIndex - process.ShardBlockFinality - 1) + err := sfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.ShardBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) + + sfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = sfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.ShardBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) +} + +func TestMetaForkDetector_ShouldAddBlockInForkDetectorShouldWork(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + mfd, _ := sync.NewMetaForkDetector(rounderMock) + + hdr := &block.MetaBlock{Nonce: 1, Round: 1} + err := mfd.ShouldAddBlockInForkDetector(hdr, process.BHProcessed, process.MetaBlockFinality) + assert.Nil(t, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.MetaBlockFinality) + assert.Nil(t, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference) + hdr.Round = uint64(rounderMock.RoundIndex - process.MetaBlockFinality) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.MetaBlockFinality) + assert.Nil(t, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.MetaBlockFinality) + assert.Nil(t, err) +} + +func TestMetaForkDetector_ShouldAddBlockInForkDetectorShouldErrLowerRoundInBlock(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + mfd, _ := sync.NewMetaForkDetector(rounderMock) + hdr := &block.MetaBlock{Nonce: 1, Round: 1} + + hdr.Round = uint64(rounderMock.RoundIndex - process.MetaBlockFinality - 1) + err := mfd.ShouldAddBlockInForkDetector(hdr, process.BHReceived, process.MetaBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) + + mfd.SetProbableHighestNonce(hdr.GetNonce() + process.MaxNoncesDifference + 1) + err = mfd.ShouldAddBlockInForkDetector(hdr, process.BHProposed, process.MetaBlockFinality) + assert.Equal(t, sync.ErrLowerRoundInBlock, err) +} + +func TestShardForkDetector_AddFinalHeadersShouldNotChangeTheFinalCheckpoint(t *testing.T) { + t.Parallel() + rounderMock := &mock.RounderMock{RoundIndex: 10} + sfd, _ := sync.NewShardForkDetector(rounderMock) + hdr1 := &block.Header{Nonce: 3, Round: 3} + hash1 := []byte("hash1") + hdr2 := &block.Header{Nonce: 1, Round: 1} + hash2 := []byte("hash2") + hdr3 := &block.Header{Nonce: 4, Round: 5} + hash3 := []byte("hash3") + + hdrs := make([]data.HeaderHandler, 0) + hashes := make([][]byte, 0) + hdrs = append(hdrs, hdr1) + hashes = append(hashes, hash1) + sfd.AddFinalHeaders(hdrs, hashes) + assert.Equal(t, hdr1.Nonce, sfd.FinalCheckpointNonce()) + + hdrs = make([]data.HeaderHandler, 0) + hashes = make([][]byte, 0) + hdrs = append(hdrs, hdr2) + hashes = append(hashes, hash2) + sfd.AddFinalHeaders(hdrs, hashes) + assert.Equal(t, hdr1.Nonce, sfd.FinalCheckpointNonce()) + + hdrs = make([]data.HeaderHandler, 0) + hashes = make([][]byte, 0) + hdrs = append(hdrs, hdr3) + hashes = append(hashes, hash3) + sfd.AddFinalHeaders(hdrs, hashes) + assert.Equal(t, hdr3.Nonce, sfd.FinalCheckpointNonce()) +} diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index dc801fb3c3a..be99bd5f3ae 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -29,6 +29,9 @@ const sleepTime = 5 * time.Millisecond // block through recovery mechanism, if its block request is not resolved and no new block header is received meantime const maxRoundsToWait = 5 +// maxHeadersToRequestInAdvance defines the maximum number of headers which will be requested in advance if they are missing +const maxHeadersToRequestInAdvance = 10 + type notarizedInfo struct { lastNotarized map[uint32]uint64 finalNotarized map[uint32]uint64 @@ -74,6 +77,7 @@ type baseBootstrap struct { chStopSync chan bool waitTime time.Duration + mutNodeSynched sync.RWMutex isNodeSynchronized bool hasLastBlock bool roundIndex int64 @@ -330,7 +334,7 @@ func (boot *baseBootstrap) requestedHeaderHash() []byte { } func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { - log.Debug(fmt.Sprintf("receivedHeaders: received header with hash %s and nonce %d from network\n", + log.Debug(fmt.Sprintf("received header with hash %s and nonce %d from network\n", core.ToB64(headerHash), headerHandler.GetNonce())) @@ -356,7 +360,7 @@ func (boot *baseBootstrap) processReceivedHeader(headerHandler data.HeaderHandle // receivedHeaderNonce method is a call back function which is called when a new header is added // in the block headers pool func (boot *baseBootstrap) receivedHeaderNonce(nonce uint64, shardId uint32, hash []byte) { - log.Debug(fmt.Sprintf("receivedHeaderNonce: received header with nonce %d and hash %s from network\n", + log.Debug(fmt.Sprintf("received header with nonce %d and hash %s from network\n", nonce, core.ToB64(hash))) @@ -366,9 +370,9 @@ func (boot *baseBootstrap) receivedHeaderNonce(nonce uint64, shardId uint32, has } if *n == nonce { - log.Info(fmt.Sprintf("received requested header with nonce %d from network and probable highest nonce is %d\n", + log.Info(fmt.Sprintf("received requested header with nonce %d and hash %s from network\n", nonce, - boot.forkDetector.ProbableHighestNonce())) + core.ToB64(hash))) boot.setRequestedHeaderNonce(nil) boot.chRcvHdrNonce <- true } @@ -432,6 +436,9 @@ func (boot *baseBootstrap) waitForHeaderHash() error { // is not synchronized yet and it has to continue the bootstrapping mechanism, otherwise the node is already // synched and it can participate to the consensus, if it is in the jobDone group of this rounder func (boot *baseBootstrap) ShouldSync() bool { + boot.mutNodeSynched.Lock() + defer boot.mutNodeSynched.Unlock() + isNodeSynchronizedInCurrentRound := boot.roundIndex == boot.rounder.Index() && boot.isNodeSynchronized if isNodeSynchronizedInCurrentRound { return false @@ -558,3 +565,32 @@ func isRandomSeedValid(header data.HeaderHandler) bool { return !isPrevRandSeedNilOrEmpty && !isRandSeedNilOrEmpty } + +func (boot *baseBootstrap) requestHeadersFromNonceIfMissing( + nonce uint64, + haveHeaderInPoolWithNonce func(uint64) bool, + hdrRes dataRetriever.HeaderResolver) { + + nbRequestedHdrs := 0 + maxNonce := core.MinUint64(nonce+maxHeadersToRequestInAdvance-1, boot.forkDetector.ProbableHighestNonce()) + for currentNonce := nonce; currentNonce <= maxNonce; currentNonce++ { + haveHeader := haveHeaderInPoolWithNonce(nonce) + if !haveHeader { + err := hdrRes.RequestDataFromNonce(currentNonce) + if err != nil { + log.Error(err.Error()) + continue + } + + nbRequestedHdrs++ + } + } + + if nbRequestedHdrs > 0 { + log.Info(fmt.Sprintf("requested in advance %d headers from nonce %d to nonce %d and probable highest nonce is %d\n", + nbRequestedHdrs, + nonce, + maxNonce, + boot.forkDetector.ProbableHighestNonce())) + } +} diff --git a/process/sync/export_test.go b/process/sync/export_test.go index 0273107dd62..f568daee595 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -11,7 +11,7 @@ func (boot *ShardBootstrap) RequestHeaderWithNonce(nonce uint64) { boot.requestHeaderWithNonce(nonce) } -func (boot *ShardBootstrap) GetMiniBlocks(hashes [][]byte) interface{} { +func (boot *ShardBootstrap) GetMiniBlocks(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { return boot.miniBlockResolver.GetMiniBlocks(hashes) } @@ -272,3 +272,15 @@ func (sbm *StorageBootstrapperMock) IsInterfaceNil() bool { } return false } + +func (bfd *baseForkDetector) ShouldAddBlockInForkDetector(header data.HeaderHandler, state process.BlockHeaderState, finality int64) error { + return bfd.shouldAddBlockInForkDetector(header, state, finality) +} + +func (bfd *baseForkDetector) SetProbableHighestNonce(nonce uint64) { + bfd.setProbableHighestNonce(nonce) +} + +func (sfd *shardForkDetector) AddFinalHeaders(finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) { + sfd.addFinalHeaders(finalHeaders, finalHeadersHashes) +} diff --git a/process/sync/metaForkDetector.go b/process/sync/metaForkDetector.go index b2b63348a58..53c1cafea6a 100644 --- a/process/sync/metaForkDetector.go +++ b/process/sync/metaForkDetector.go @@ -54,7 +54,7 @@ func (mfd *metaForkDetector) AddHeader( return err } - err = mfd.checkMetaBlockValidity(header) + err = mfd.shouldAddBlockInForkDetector(header, state, process.MetaBlockFinality) if err != nil { return err } @@ -78,12 +78,3 @@ func (mfd *metaForkDetector) AddHeader( return nil } - -func (mfd *metaForkDetector) checkMetaBlockValidity(header data.HeaderHandler) error { - roundTooOld := int64(header.GetRound()) < mfd.rounder.Index()-process.MetaBlockFinality - if roundTooOld { - return ErrLowerRoundInBlock - } - - return nil -} diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 29660b934e5..3c1c853a100 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -451,6 +451,8 @@ func (boot *MetaBootstrap) SyncBlock() error { return err } + go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.haveMetaHeaderInPoolWithNonce, boot.hdrRes) + haveTime := func() time.Duration { return boot.rounder.TimeDuration() } @@ -483,7 +485,9 @@ func (boot *MetaBootstrap) requestHeaderWithNonce(nonce uint64) { boot.setRequestedHeaderNonce(&nonce) err := boot.hdrRes.RequestDataFromNonce(nonce) - log.Info(fmt.Sprintf("requested header with nonce %d from network\n", nonce)) + log.Info(fmt.Sprintf("requested header with nonce %d from network and probable highest nonce is %d\n", + nonce, + boot.forkDetector.ProbableHighestNonce())) if err != nil { log.Error(err.Error()) @@ -674,3 +678,12 @@ func (boot *MetaBootstrap) IsInterfaceNil() bool { } return false } + +func (boot *MetaBootstrap) haveMetaHeaderInPoolWithNonce(nonce uint64) bool { + _, _, err := process.GetMetaHeaderFromPoolWithNonce( + nonce, + boot.headers, + boot.headersNonces) + + return err == nil +} diff --git a/process/sync/shardForkDetector.go b/process/sync/shardForkDetector.go index e2cd33c2930..82cc215560d 100644 --- a/process/sync/shardForkDetector.go +++ b/process/sync/shardForkDetector.go @@ -54,6 +54,11 @@ func (sfd *shardForkDetector) AddHeader( return err } + err = sfd.shouldAddBlockInForkDetector(header, state, process.ShardBlockFinality) + if err != nil { + return err + } + if state == process.BHProcessed { sfd.addFinalHeaders(finalHeaders, finalHeadersHashes) sfd.addCheckpoint(&checkpointInfo{nonce: header.GetNonce(), round: header.GetRound()}) @@ -77,8 +82,8 @@ func (sfd *shardForkDetector) AddHeader( func (sfd *shardForkDetector) addFinalHeaders(finalHeaders []data.HeaderHandler, finalHeadersHashes [][]byte) { finalCheckpointWasSet := false for i := 0; i < len(finalHeaders); i++ { - isFinalHeaderNonceHigherThanCurrent := finalHeaders[i].GetNonce() > sfd.GetHighestFinalBlockNonce() - if isFinalHeaderNonceHigherThanCurrent { + isFinalHeaderNonceNotLowerThanCurrent := finalHeaders[i].GetNonce() >= sfd.finalCheckpoint().nonce + if isFinalHeaderNonceNotLowerThanCurrent { if !finalCheckpointWasSet { sfd.setFinalCheckpoint(&checkpointInfo{nonce: finalHeaders[i].GetNonce(), round: finalHeaders[i].GetRound()}) finalCheckpointWasSet = true diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index d381596d6b8..bef2da6fc63 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -252,14 +252,17 @@ func (boot *ShardBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data return nil, process.ErrWrongTypeAssertion } - miniBlockHashes := make([][]byte, 0) + hashes := make([][]byte, len(header.MiniBlockHeaders)) for i := 0; i < len(header.MiniBlockHeaders); i++ { - miniBlockHashes = append(miniBlockHashes, header.MiniBlockHeaders[i].Hash) + hashes[i] = header.MiniBlockHeaders[i].Hash } - miniBlockSlice := boot.miniBlockResolver.GetMiniBlocks(miniBlockHashes) + miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(hashes) + if len(missingMiniBlocksHashes) > 0 { + return nil, process.ErrMissingBody + } - return block.Body(miniBlockSlice), nil + return block.Body(miniBlocks), nil } func (boot *ShardBootstrap) removeBlockBody( @@ -672,12 +675,14 @@ func (boot *ShardBootstrap) SyncBlock() error { return err } - miniBlockHashes := make([][]byte, 0) + go boot.requestHeadersFromNonceIfMissing(hdr.GetNonce()+1, boot.haveShardHeaderInPoolWithNonce, boot.hdrRes) + + hashes := make([][]byte, len(hdr.MiniBlockHeaders)) for i := 0; i < len(hdr.MiniBlockHeaders); i++ { - miniBlockHashes = append(miniBlockHashes, hdr.MiniBlockHeaders[i].Hash) + hashes[i] = hdr.MiniBlockHeaders[i].Hash } - blk, err := boot.getMiniBlocksRequestingIfMissing(miniBlockHashes) + miniBlockSlice, err := boot.getMiniBlocksRequestingIfMissing(hashes) if err != nil { return err } @@ -686,12 +691,6 @@ func (boot *ShardBootstrap) SyncBlock() error { return boot.rounder.TimeDuration() } - miniBlockSlice, ok := blk.(block.MiniBlockSlice) - if !ok { - err = process.ErrWrongTypeAssertion - return err - } - blockBody := block.Body(miniBlockSlice) timeBefore := time.Now() err = boot.blkExecutor.ProcessBlock(boot.blkc, hdr, blockBody, haveTime) @@ -720,7 +719,9 @@ func (boot *ShardBootstrap) requestHeaderWithNonce(nonce uint64) { boot.setRequestedHeaderNonce(&nonce) err := boot.hdrRes.RequestDataFromNonce(nonce) - log.Info(fmt.Sprintf("requested header with nonce %d from network\n", nonce)) + log.Info(fmt.Sprintf("requested header with nonce %d from network and probable highest nonce is %d\n", + nonce, + boot.forkDetector.ProbableHighestNonce())) if err != nil { log.Error(err.Error()) @@ -800,7 +801,7 @@ func (boot *ShardBootstrap) requestMiniBlocks(hashes [][]byte) { boot.setRequestedMiniBlocks(hashes) err = boot.miniBlockResolver.RequestDataFromHashArray(hashes) - log.Info(fmt.Sprintf("requested %v miniblocks from network\n", len(hashes))) + log.Info(fmt.Sprintf("requested %d miniblocks from network\n", len(hashes))) if err != nil { log.Error(err.Error()) @@ -812,20 +813,22 @@ func (boot *ShardBootstrap) requestMiniBlocks(hashes [][]byte) { // the func returns interface{} as to match the next implementations for block body fetchers // that will be added. The block executor should decide by parsing the header block body type value // what kind of block body received. -func (boot *ShardBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (interface{}, error) { - miniBlocks := boot.miniBlockResolver.GetMiniBlocks(hashes) - if miniBlocks == nil { +func (boot *ShardBootstrap) getMiniBlocksRequestingIfMissing(hashes [][]byte) (block.MiniBlockSlice, error) { + miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocksFromPool(hashes) + if len(missingMiniBlocksHashes) > 0 { _ = process.EmptyChannel(boot.chRcvMiniBlocks) - boot.requestMiniBlocks(hashes) + boot.requestMiniBlocks(missingMiniBlocksHashes) err := boot.waitForMiniBlocks() if err != nil { return nil, err } - miniBlocks = boot.miniBlockResolver.GetMiniBlocks(hashes) - if miniBlocks == nil { + receivedMiniBlocks, unreceivedMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocksFromPool(missingMiniBlocksHashes) + if len(unreceivedMiniBlocksHashes) > 0 { return nil, process.ErrMissingBody } + + miniBlocks = append(miniBlocks, receivedMiniBlocks...) } return miniBlocks, nil @@ -962,14 +965,17 @@ func (boot *ShardBootstrap) getPrevHeader(headerStore storage.Storer, header *bl } func (boot *ShardBootstrap) getTxBlockBody(header *block.Header) (block.Body, error) { - mbLength := len(header.MiniBlockHeaders) - hashes := make([][]byte, mbLength) - for i := 0; i < mbLength; i++ { + hashes := make([][]byte, len(header.MiniBlockHeaders)) + for i := 0; i < len(header.MiniBlockHeaders); i++ { hashes[i] = header.MiniBlockHeaders[i].Hash } - bodyMiniBlocks := boot.miniBlockResolver.GetMiniBlocks(hashes) - return block.Body(bodyMiniBlocks), nil + miniBlocks, missingMiniBlocksHashes := boot.miniBlockResolver.GetMiniBlocks(hashes) + if len(missingMiniBlocksHashes) > 0 { + return nil, process.ErrMissingBody + } + + return block.Body(miniBlocks), nil } func (boot *ShardBootstrap) getCurrentHeader() (*block.Header, error) { @@ -993,3 +999,13 @@ func (boot *ShardBootstrap) IsInterfaceNil() bool { } return false } + +func (boot *ShardBootstrap) haveShardHeaderInPoolWithNonce(nonce uint64) bool { + _, _, err := process.GetShardHeaderFromPoolWithNonce( + nonce, + boot.shardCoordinator.SelfId(), + boot.headers, + boot.headersNonces) + + return err == nil +} diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 8e37c19f78a..64f0d3a658b 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -55,8 +55,11 @@ func createMockResolversFinder() *mock.ResolversFinderStub { if strings.Contains(baseTopic, factory.MiniBlocksTopic) { return &mock.MiniBlocksResolverMock{ - GetMiniBlocksCalled: func(hashes [][]byte) block.MiniBlockSlice { - return make(block.MiniBlockSlice, 0) + GetMiniBlocksCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), make([][]byte, 0) + }, + GetMiniBlocksFromPoolCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), make([][]byte, 0) }, }, nil } @@ -88,8 +91,11 @@ func createMockResolversFinderNilMiniBlocks() *mock.ResolversFinderStub { RequestDataFromHashArrayCalled: func(hash [][]byte) error { return nil }, - GetMiniBlocksCalled: func(hashes [][]byte) block.MiniBlockSlice { - return nil + GetMiniBlocksCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), [][]byte{[]byte("hash")} + }, + GetMiniBlocksFromPoolCalled: func(hashes [][]byte) (block.MiniBlockSlice, [][]byte) { + return make(block.MiniBlockSlice, 0), [][]byte{[]byte("hash")} }, }, nil } @@ -1963,7 +1969,7 @@ func TestShardGetBlockFromPoolShouldReturnBlock(t *testing.T) { mbHashes := make([][]byte, 0) mbHashes = append(mbHashes, []byte("aaaa")) - mb := bs.GetMiniBlocks(mbHashes) + mb, _ := bs.GetMiniBlocks(mbHashes) assert.True(t, reflect.DeepEqual(blk, mb)) } @@ -2625,12 +2631,12 @@ func TestBootstrap_GetTxBodyHavingHashReturnsFromCacherShouldWork(t *testing.T) account, math.MaxUint32, ) - txBlockRecovered := bs.GetMiniBlocks(requestedHash) + txBlockRecovered, _ := bs.GetMiniBlocks(requestedHash) assert.True(t, reflect.DeepEqual(txBlockRecovered, txBlock)) } -func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetNil(t *testing.T) { +func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetEmptySlice(t *testing.T) { t.Parallel() mbh := []byte("requested hash") @@ -2679,9 +2685,9 @@ func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetNil(t *t account, math.MaxUint32, ) - txBlockRecovered := bs.GetMiniBlocks(requestedHash) + txBlockRecovered, _ := bs.GetMiniBlocks(requestedHash) - assert.Nil(t, txBlockRecovered) + assert.Equal(t, 0, len(txBlockRecovered)) } func TestBootstrap_GetTxBodyHavingHashFoundInStorageShouldWork(t *testing.T) { @@ -2739,7 +2745,7 @@ func TestBootstrap_GetTxBodyHavingHashFoundInStorageShouldWork(t *testing.T) { account, math.MaxUint32, ) - txBlockRecovered := bs.GetMiniBlocks(requestedHash) + txBlockRecovered, _ := bs.GetMiniBlocks(requestedHash) assert.Equal(t, txBlock, txBlockRecovered) } @@ -4898,7 +4904,7 @@ func NewStorageBootstrapperMock() *sync.StorageBootstrapperMock { fmt.Printf("last notarized items: %d\n", len(lastNotarized)) }, AddHeaderToForkDetectorCalled: func(shardId uint32, nonce uint64, lastNotarizedMeta uint64) { - fmt.Printf("add header to fork detector called") + fmt.Printf("add header to fork detector called\n") }, } diff --git a/process/sync/testMetaBootstrap.go b/process/sync/testMetaBootstrap.go new file mode 100644 index 00000000000..3cd493ae6e4 --- /dev/null +++ b/process/sync/testMetaBootstrap.go @@ -0,0 +1,24 @@ +package sync + +// TestMetaBootstrap extends MetaBootstrap and is used in integration tests as it exposes some funcs +// that are not supposed to be used in production code +// Exported funcs simplify the reproduction of edge cases +type TestMetaBootstrap struct { + *MetaBootstrap +} + +// ForkChoice decides to call (or not) the rollback on the current block from the blockchain structure +func (tmb *TestMetaBootstrap) ForkChoice(revertUsingForkNonce bool) error { + return tmb.forkChoice(revertUsingForkNonce) +} + +// SetProbableHighestNonce sets the probable highest nonce in the contained fork detector +func (tmb *TestMetaBootstrap) SetProbableHighestNonce(nonce uint64) { + forkDetector, ok := tmb.forkDetector.(*metaForkDetector) + if !ok { + log.Error("inner forkdetector impl is not of type metaForkDetector") + return + } + + forkDetector.setProbableHighestNonce(nonce) +} diff --git a/process/sync/testShardBootstrap.go b/process/sync/testShardBootstrap.go new file mode 100644 index 00000000000..f19556b8df6 --- /dev/null +++ b/process/sync/testShardBootstrap.go @@ -0,0 +1,24 @@ +package sync + +// TestShardBootstrap extends ShardBootstrap and is used in integration tests as it exposes some funcs +// that are not supposed to be used in production code +// Exported funcs simplify the reproduction of edge cases +type TestShardBootstrap struct { + *ShardBootstrap +} + +// ForkChoice decides to call (or not) the rollback on the current block from the blockchain structure +func (tsb *TestShardBootstrap) ForkChoice(revertUsingForkNonce bool) error { + return tsb.forkChoice(revertUsingForkNonce) +} + +// SetProbableHighestNonce sets the probable highest nonce in the contained fork detector +func (tsb *TestShardBootstrap) SetProbableHighestNonce(nonce uint64) { + forkDetector, ok := tsb.forkDetector.(*shardForkDetector) + if !ok { + log.Error("inner forkdetector impl is not of type shardForkDetector") + return + } + + forkDetector.setProbableHighestNonce(nonce) +} diff --git a/process/throttle/block.go b/process/throttle/block.go index ed7b51d2e64..f5f37147c98 100644 --- a/process/throttle/block.go +++ b/process/throttle/block.go @@ -112,7 +112,7 @@ func (bst *blockSizeThrottle) getMaxItemsWhenSucceed(lastActionMaxItems uint32) return noOfMaxItemsUsedWithoutSucceed } - increasedNoOfItems := core.Max(1, uint32(float32(noOfMaxItemsUsedWithoutSucceed-lastActionMaxItems)*jumpAboveFactor)) + increasedNoOfItems := core.MaxUint32(1, uint32(float32(noOfMaxItemsUsedWithoutSucceed-lastActionMaxItems)*jumpAboveFactor)) return lastActionMaxItems + increasedNoOfItems } @@ -136,7 +136,7 @@ func (bst *blockSizeThrottle) getMaxItemsWhenNotSucceed(lastActionMaxItems uint3 return noOfMaxItemsUsedWithSucceed } - decreasedNoOfItems := core.Max(1, uint32(float32(lastActionMaxItems-noOfMaxItemsUsedWithSucceed)*jumpBelowFactor)) + decreasedNoOfItems := core.MaxUint32(1, uint32(float32(lastActionMaxItems-noOfMaxItemsUsedWithSucceed)*jumpBelowFactor)) return lastActionMaxItems - decreasedNoOfItems } diff --git a/process/throttle/block_test.go b/process/throttle/block_test.go index 4ebf7039e3d..de7527c3680 100644 --- a/process/throttle/block_test.go +++ b/process/throttle/block_test.go @@ -3,6 +3,7 @@ package throttle_test import ( "testing" + "github.com/ElrondNetwork/elrond-go/core" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/throttle" "github.com/stretchr/testify/assert" @@ -133,7 +134,7 @@ func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToMinItemsInBlockWhen func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToADecreasedValueWhenLastActionNotSucceed(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - lastActionMaxItems1 := uint32(12000) + lastActionMaxItems1 := core.MaxUint32(12000, process.MinItemsInBlock) bst.SetMaxItems(lastActionMaxItems1) bst.Add(2, 0) bst.SetSucceed(2, false) @@ -142,7 +143,7 @@ func TestBlockSizeThrottle_ComputeMaxItemsShouldSetMaxItemsToADecreasedValueWhen assert.Equal(t, decreasedValue, bst.MaxItemsToAdd()) bst.SetSucceed(2, true) - lastActionMaxItems2 := uint32(14000) + lastActionMaxItems2 := core.MaxUint32(14000, process.MinItemsInBlock) bst.SetMaxItems(lastActionMaxItems2) bst.Add(3, 0) bst.SetSucceed(3, false) @@ -178,12 +179,12 @@ func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldReturnNoOfMaxItemsUsedWit func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldIncreaseMaxItemsWithAtLeastOneUnit(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - maxItemsUsedWithoutSucceed := uint32(process.MinItemsInBlock + 1) + maxItemsUsedWithoutSucceed := core.MinUint32(process.MinItemsInBlock+1, process.MaxItemsInBlock) bst.SetMaxItems(maxItemsUsedWithoutSucceed) bst.Add(2, 0) maxItemsWhenSucceed := bst.GetMaxItemsWhenSucceed(process.MinItemsInBlock) - assert.Equal(t, uint32(process.MinItemsInBlock+1), maxItemsWhenSucceed) + assert.Equal(t, core.MinUint32(process.MinItemsInBlock+1, process.MaxItemsInBlock), maxItemsWhenSucceed) } func TestBlockSizeThrottle_GetMaxItemsWhenSucceedShouldIncreaseMaxItems(t *testing.T) { @@ -257,7 +258,7 @@ func TestBlockSizeThrottle_GetMaxItemsWhenNotSucceedShouldDecreaseMaxItemsWithAt func TestBlockSizeThrottle_GetMaxItemsWhenNotSucceedShouldDecreaseMaxItems(t *testing.T) { bst, _ := throttle.NewBlockSizeThrottle() - maxItemsUsedWithSucceed := uint32(7000) + maxItemsUsedWithSucceed := core.MaxUint32(7000, process.MinItemsInBlock) bst.SetMaxItems(maxItemsUsedWithSucceed) bst.Add(2, 0) bst.SetSucceed(2, true) diff --git a/process/track/metaBlock.go b/process/track/metaBlock.go deleted file mode 100644 index 2df33492a8c..00000000000 --- a/process/track/metaBlock.go +++ /dev/null @@ -1,46 +0,0 @@ -package track - -import ( - "github.com/ElrondNetwork/elrond-go/data" -) - -// metaBlockTracker implements NotarisedBlocksTracker interface which tracks notarised blocks -type metaBlockTracker struct { -} - -// NewMetaBlockTracker creates a new metaBlockTracker object -func NewMetaBlockTracker() (*metaBlockTracker, error) { - mbt := metaBlockTracker{} - return &mbt, nil -} - -// UnnotarisedBlocks gets all the blocks which are not notarised yet -func (mbt *metaBlockTracker) UnnotarisedBlocks() []data.HeaderHandler { - return make([]data.HeaderHandler, 0) -} - -// RemoveNotarisedBlocks removes all the blocks which already have been notarised -func (mbt *metaBlockTracker) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - return nil -} - -// AddBlock adds new block to be tracked -func (mbt *metaBlockTracker) AddBlock(headerHandler data.HeaderHandler) { -} - -// SetBlockBroadcastRound sets the round in which the block with the given nonce has been broadcast -func (mbt *metaBlockTracker) SetBlockBroadcastRound(nonce uint64, round int64) { -} - -// BlockBroadcastRound gets the round in which the block with given nonce has been broadcast -func (mbt *metaBlockTracker) BlockBroadcastRound(nonce uint64) int64 { - return 0 -} - -// IsInterfaceNil returns true if there is no value under the interface -func (mbt *metaBlockTracker) IsInterfaceNil() bool { - if mbt == nil { - return true - } - return false -} diff --git a/process/track/metaBlock_test.go b/process/track/metaBlock_test.go deleted file mode 100644 index ec9d21fabe2..00000000000 --- a/process/track/metaBlock_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package track_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/process/track" - "github.com/stretchr/testify/assert" -) - -func TestMetaBlockTracker_NewMetaBlockTrackerShouldWork(t *testing.T) { - t.Parallel() - - mbt, err := track.NewMetaBlockTracker() - assert.Nil(t, err) - assert.NotNil(t, mbt) -} - -func TestMetaBlockTracker_UnnotarisedBlocksShouldWork(t *testing.T) { - t.Parallel() - - mbt, _ := track.NewMetaBlockTracker() - headers := mbt.UnnotarisedBlocks() - assert.Equal(t, make([]data.HeaderHandler, 0), headers) -} - -func TestMetaBlockTracker_BlockBroadcastRoundShouldWork(t *testing.T) { - t.Parallel() - - mbt, _ := track.NewMetaBlockTracker() - assert.Equal(t, int64(0), mbt.BlockBroadcastRound(1)) -} - -func TestMetaBlockTracker_RemoveNotarisedBlocksShouldWork(t *testing.T) { - t.Parallel() - - mbt, _ := track.NewMetaBlockTracker() - err := mbt.RemoveNotarisedBlocks(&block.MetaBlock{}) - assert.Nil(t, err) -} diff --git a/process/track/shardBlock.go b/process/track/shardBlock.go deleted file mode 100644 index 8eb0d9ecdc1..00000000000 --- a/process/track/shardBlock.go +++ /dev/null @@ -1,170 +0,0 @@ -package track - -import ( - "fmt" - "sync" - - "github.com/ElrondNetwork/elrond-go/core" - "github.com/ElrondNetwork/elrond-go/core/logger" - "github.com/ElrondNetwork/elrond-go/data" - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/dataRetriever" - "github.com/ElrondNetwork/elrond-go/marshal" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/sharding" -) - -var log = logger.DefaultLogger() - -type headerInfo struct { - header data.HeaderHandler - broadcastInRound int64 -} - -// shardBlockTracker implements NotarisedBlocksTracker interface which tracks notarised blocks -type shardBlockTracker struct { - dataPool dataRetriever.PoolsHolder - marshalizer marshal.Marshalizer - shardCoordinator sharding.Coordinator - store dataRetriever.StorageService - - mutUnnotarisedHeaders sync.RWMutex - unnotarisedHeaders map[uint64]*headerInfo -} - -// NewShardBlockTracker creates a new shardBlockTracker object -func NewShardBlockTracker( - dataPool dataRetriever.PoolsHolder, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - store dataRetriever.StorageService, -) (*shardBlockTracker, error) { - err := checkTrackerNilParameters( - dataPool, - marshalizer, - shardCoordinator, - store) - if err != nil { - return nil, err - } - - sbt := shardBlockTracker{ - dataPool: dataPool, - marshalizer: marshalizer, - shardCoordinator: shardCoordinator, - store: store, - } - - sbt.unnotarisedHeaders = make(map[uint64]*headerInfo) - - return &sbt, nil -} - -// checkTrackerNilParameters will check the imput parameters for nil values -func checkTrackerNilParameters( - dataPool dataRetriever.PoolsHolder, - marshalizer marshal.Marshalizer, - shardCoordinator sharding.Coordinator, - store dataRetriever.StorageService, -) error { - if dataPool == nil || dataPool.IsInterfaceNil() { - return process.ErrNilDataPoolHolder - } - if marshalizer == nil || marshalizer.IsInterfaceNil() { - return process.ErrNilMarshalizer - } - if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { - return process.ErrNilShardCoordinator - } - if store == nil || store.IsInterfaceNil() { - return process.ErrNilStorage - } - - return nil -} - -// AddBlock adds new block to be tracked -func (sbt *shardBlockTracker) AddBlock(headerHandler data.HeaderHandler) { - sbt.mutUnnotarisedHeaders.Lock() - sbt.unnotarisedHeaders[headerHandler.GetNonce()] = &headerInfo{header: headerHandler, broadcastInRound: 0} - sbt.mutUnnotarisedHeaders.Unlock() -} - -// RemoveNotarisedBlocks removes all the blocks which already have been notarised -func (sbt *shardBlockTracker) RemoveNotarisedBlocks(headerHandler data.HeaderHandler) error { - metaBlock, ok := headerHandler.(*block.MetaBlock) - if !ok { - return process.ErrWrongTypeAssertion - } - - for _, shardData := range metaBlock.ShardInfo { - if shardData.ShardId != sbt.shardCoordinator.SelfId() { - continue - } - - header, err := process.GetShardHeaderFromPool( - shardData.HeaderHash, - sbt.dataPool.Headers()) - if err != nil { - continue - } - - sbt.mutUnnotarisedHeaders.Lock() - delete(sbt.unnotarisedHeaders, header.Nonce) - sbt.mutUnnotarisedHeaders.Unlock() - - log.Debug(fmt.Sprintf("shardBlock with nonce %d and hash %s has been notarised by metachain\n", - header.GetNonce(), - core.ToB64(shardData.HeaderHash))) - } - - return nil -} - -// UnnotarisedBlocks gets all the blocks which are not notarised yet -func (sbt *shardBlockTracker) UnnotarisedBlocks() []data.HeaderHandler { - sbt.mutUnnotarisedHeaders.RLock() - - hdrs := make([]data.HeaderHandler, 0) - for _, hInfo := range sbt.unnotarisedHeaders { - hdrs = append(hdrs, hInfo.header) - } - - sbt.mutUnnotarisedHeaders.RUnlock() - - return hdrs -} - -// SetBlockBroadcastRound sets the round in which the block with the given nonce has been broadcast -func (sbt *shardBlockTracker) SetBlockBroadcastRound(nonce uint64, round int64) { - sbt.mutUnnotarisedHeaders.Lock() - - hInfo := sbt.unnotarisedHeaders[nonce] - if hInfo != nil { - hInfo.broadcastInRound = round - sbt.unnotarisedHeaders[nonce] = hInfo - } - - sbt.mutUnnotarisedHeaders.Unlock() -} - -// BlockBroadcastRound gets the round in which the block with given nonce has been broadcast -func (sbt *shardBlockTracker) BlockBroadcastRound(nonce uint64) int64 { - sbt.mutUnnotarisedHeaders.RLock() - hInfo := sbt.unnotarisedHeaders[nonce] - sbt.mutUnnotarisedHeaders.RUnlock() - - if hInfo == nil { - return 0 - } - - return hInfo.broadcastInRound -} - -// IsInterfaceNil returns true if there is no value under the interface -func (sbt *shardBlockTracker) IsInterfaceNil() bool { - if sbt == nil { - return true - } - return false -} diff --git a/process/track/shardBlock_test.go b/process/track/shardBlock_test.go deleted file mode 100644 index cb51c1c8802..00000000000 --- a/process/track/shardBlock_test.go +++ /dev/null @@ -1,206 +0,0 @@ -package track_test - -import ( - "testing" - - "github.com/ElrondNetwork/elrond-go/data/block" - "github.com/ElrondNetwork/elrond-go/process" - "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/process/track" - "github.com/ElrondNetwork/elrond-go/storage" - "github.com/stretchr/testify/assert" -) - -func TestNewShardBlockTracker_NilDataPoolShouldErr(t *testing.T) { - t.Parallel() - - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(nil, marshalizer, shardCoordinator, store) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilDataPoolHolder, err) -} - -func TestNewShardBlockTracker_NilMarshalizerShouldErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(pools, nil, shardCoordinator, store) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilMarshalizer, err) -} - -func TestNewShardBlockTracker_NilShardCoordinatorShouldErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(pools, marshalizer, nil, store) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilShardCoordinator, err) -} - -func TestNewShardBlockTracker_NilStoreShouldErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - - mbt, err := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, nil) - assert.Nil(t, mbt) - assert.Equal(t, process.ErrNilStorage, err) -} - -func TestNewShardBlockTracker_OkValsShouldWork(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, err := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - assert.Nil(t, err) - assert.NotNil(t, mbt) -} - -func TestShardBlockTracker_AddBlockShouldWork(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - hdr1 := &block.Header{Nonce: 2} - mbt.AddBlock(hdr1) - hdr2 := &block.Header{Nonce: 3} - mbt.AddBlock(hdr2) - headers := mbt.UnnotarisedBlocks() - assert.Equal(t, 2, len(headers)) -} - -func TestShardBlockTracker_SetBlockBroadcastRoundShoudNotSetRoundWhenNonceDoesNotExist(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - hdr := &block.Header{Nonce: 2} - mbt.AddBlock(hdr) - mbt.SetBlockBroadcastRound(1, 10) - assert.Equal(t, int64(0), mbt.BlockBroadcastRound(1)) -} - -func TestShardBlockTracker_SetBlockBroadcastRoundShoudSetRound(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - hdr := &block.Header{Nonce: 2} - mbt.AddBlock(hdr) - mbt.SetBlockBroadcastRound(2, 10) - assert.Equal(t, int64(10), mbt.BlockBroadcastRound(2)) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldErrWrongTypeAssertion(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - err := mbt.RemoveNotarisedBlocks(nil) - assert.Equal(t, process.ErrWrongTypeAssertion, err) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldNotRemoveIfShardIdIsNotSelf(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{} - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - metaBlock := &block.MetaBlock{} - shardInfo := make([]block.ShardData, 0) - sd := block.ShardData{ShardId: 1, HeaderHash: []byte("1")} - shardInfo = append(shardInfo, sd) - metaBlock.ShardInfo = shardInfo - header := &block.Header{Nonce: 1} - mbt.AddBlock(header) - _ = mbt.RemoveNotarisedBlocks(metaBlock) - assert.Equal(t, 1, len(mbt.UnnotarisedBlocks())) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldNotRemoveIfGetShardHeaderErr(t *testing.T) { - t.Parallel() - - pools := &mock.PoolsHolderStub{ - HeadersCalled: func() storage.Cacher { - return nil - }, - } - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - metaBlock := &block.MetaBlock{} - shardInfo := make([]block.ShardData, 0) - sd := block.ShardData{ShardId: 0, HeaderHash: []byte("1")} - shardInfo = append(shardInfo, sd) - metaBlock.ShardInfo = shardInfo - header := &block.Header{Nonce: 1} - mbt.AddBlock(header) - _ = mbt.RemoveNotarisedBlocks(metaBlock) - assert.Equal(t, 1, len(mbt.UnnotarisedBlocks())) -} - -func TestShardBlockTracker_RemoveNotarisedBlocksShouldWork(t *testing.T) { - t.Parallel() - - header := &block.Header{Nonce: 1} - - pools := &mock.PoolsHolderStub{ - HeadersCalled: func() storage.Cacher { - return &mock.CacherStub{ - PeekCalled: func(key []byte) (value interface{}, ok bool) { - return header, true - }, - } - }, - } - marshalizer := &mock.MarshalizerMock{} - shardCoordinator := mock.NewOneShardCoordinatorMock() - store := &mock.ChainStorerMock{} - - mbt, _ := track.NewShardBlockTracker(pools, marshalizer, shardCoordinator, store) - metaBlock := &block.MetaBlock{} - shardInfo := make([]block.ShardData, 0) - sd := block.ShardData{ShardId: 0, HeaderHash: []byte("1")} - shardInfo = append(shardInfo, sd) - metaBlock.ShardInfo = shardInfo - mbt.AddBlock(header) - _ = mbt.RemoveNotarisedBlocks(metaBlock) - assert.Equal(t, 0, len(mbt.UnnotarisedBlocks())) -} diff --git a/process/transaction/interceptedTransaction.go b/process/transaction/interceptedTransaction.go index 46710308707..7e5e3782060 100644 --- a/process/transaction/interceptedTransaction.go +++ b/process/transaction/interceptedTransaction.go @@ -27,6 +27,7 @@ type InterceptedTransaction struct { sndShard uint32 isAddressedToOtherShards bool sndAddr state.AddressContainer + feeHandler process.FeeHandler } // NewInterceptedTransaction returns a new instance of InterceptedTransaction @@ -38,6 +39,7 @@ func NewInterceptedTransaction( signer crypto.SingleSigner, addrConv state.AddressConverter, coordinator sharding.Coordinator, + feeHandler process.FeeHandler, ) (*InterceptedTransaction, error) { if txBuff == nil { @@ -61,6 +63,9 @@ func NewInterceptedTransaction( if coordinator == nil || coordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } + if feeHandler == nil || coordinator.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } tx := &transaction.Transaction{} err := marshalizer.Unmarshal(tx, txBuff) @@ -76,6 +81,7 @@ func NewInterceptedTransaction( addrConv: addrConv, keyGen: keyGen, coordinator: coordinator, + feeHandler: feeHandler, } txBuffWithoutSig, err := inTx.processFields(txBuff) @@ -133,23 +139,33 @@ func (inTx *InterceptedTransaction) integrity() error { if inTx.tx.Signature == nil { return process.ErrNilSignature } - if inTx.tx.RcvAddr == nil { return process.ErrNilRcvAddr } - if inTx.tx.SndAddr == nil { return process.ErrNilSndAddr } - if inTx.tx.Value == nil { return process.ErrNilValue } - if inTx.tx.Value.Cmp(big.NewInt(0)) < 0 { return process.ErrNegativeValue } + return inTx.checkFeeValues() +} + +func (inTx *InterceptedTransaction) checkFeeValues() error { + isLowerGasLimitInTx := inTx.tx.GasLimit < inTx.feeHandler.MinGasLimit() + if isLowerGasLimitInTx { + return process.ErrInsufficientGasLimitInTx + } + + isLowerGasPrice := inTx.tx.GasPrice < inTx.feeHandler.MinGasPrice() + if isLowerGasPrice { + return process.ErrInsufficientGasPriceInTx + } + return nil } diff --git a/process/transaction/interceptedTransaction_test.go b/process/transaction/interceptedTransaction_test.go index 6040c5f6f3f..e4346da84a5 100644 --- a/process/transaction/interceptedTransaction_test.go +++ b/process/transaction/interceptedTransaction_test.go @@ -47,7 +47,24 @@ func createKeyGenMock() crypto.KeyGenerator { } } -func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction) (*transaction.InterceptedTransaction, error) { +func createTxFeeHandler(gasPrice uint64, gasLimit uint64) process.FeeHandler { + feeHandler := &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return gasPrice + }, + MinGasLimitCalled: func() uint64 { + return gasLimit + }, + } + + return feeHandler +} + +func createFreeTxFeeHandler() process.FeeHandler { + return createTxFeeHandler(0, 0) +} + +func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction, txFeeHandler process.FeeHandler) (*transaction.InterceptedTransaction, error) { marshalizer := &mock.MarshalizerMock{} txBuff, _ := marshalizer.Marshal(tx) @@ -76,6 +93,7 @@ func createInterceptedTxFromPlainTx(tx *dataTransaction.Transaction) (*transacti }, }, shardCoordinator, + txFeeHandler, ) } @@ -90,6 +108,7 @@ func TestNewInterceptedTransaction_NilBufferShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -107,6 +126,7 @@ func TestNewInterceptedTransaction_NilMarshalizerShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -124,6 +144,7 @@ func TestNewInterceptedTransaction_NilHasherShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -141,6 +162,7 @@ func TestNewInterceptedTransaction_NilKeyGenShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -158,6 +180,7 @@ func TestNewInterceptedTransaction_NilSignerShouldErr(t *testing.T) { nil, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -175,6 +198,7 @@ func TestNewInterceptedTransaction_NilAddressConverterShouldErr(t *testing.T) { &mock.SignerMock{}, nil, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -192,12 +216,31 @@ func TestNewInterceptedTransaction_NilCoordinatorShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, nil, + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) assert.Equal(t, process.ErrNilShardCoordinator, err) } +func TestNewInterceptedTransaction_NilFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + txi, err := transaction.NewInterceptedTransaction( + make([]byte, 0), + &mock.MarshalizerMock{}, + mock.HasherMock{}, + &mock.SingleSignKeyGenMock{}, + &mock.SignerMock{}, + &mock.AddressConverterMock{}, + mock.NewOneShardCoordinatorMock(), + nil, + ) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) +} + func TestNewInterceptedTransaction_UnmarshalingTxFailsShouldErr(t *testing.T) { t.Parallel() @@ -215,6 +258,7 @@ func TestNewInterceptedTransaction_UnmarshalingTxFailsShouldErr(t *testing.T) { &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -241,6 +285,7 @@ func TestNewInterceptedTransaction_MarshalingCopiedTxFailsShouldErr(t *testing.T &mock.SignerMock{}, &mock.AddressConverterMock{}, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -262,6 +307,7 @@ func TestNewInterceptedTransaction_AddrConvFailsShouldErr(t *testing.T) { }, }, mock.NewOneShardCoordinatorMock(), + &mock.FeeHandlerStub{}, ) assert.Nil(t, txi) @@ -282,7 +328,7 @@ func TestNewInterceptedTransaction_NilSignatureShouldErr(t *testing.T) { Signature: nil, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilSignature, err) @@ -302,7 +348,7 @@ func TestNewInterceptedTransaction_NilSenderAddressShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilSndAddr, err) @@ -322,7 +368,7 @@ func TestNewInterceptedTransaction_NilRecvAddressShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilRcvAddr, err) @@ -342,7 +388,7 @@ func TestNewInterceptedTransaction_NilValueShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNilValue, err) @@ -362,7 +408,7 @@ func TestNewInterceptedTransaction_NilNegativeValueShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, process.ErrNegativeValue, err) @@ -382,12 +428,58 @@ func TestNewInterceptedTransaction_InvalidSenderShouldErr(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, errSingleSignKeyGenMock, err) } +func TestNewInterceptedTransaction_InsufficientGasPriceShouldErr(t *testing.T) { + t.Parallel() + + gasLimit := uint64(3) + gasPrice := uint64(4) + tx := &dataTransaction.Transaction{ + Nonce: 1, + Value: big.NewInt(2), + Data: "data", + GasLimit: gasLimit, + GasPrice: gasPrice, + RcvAddr: recvAddress, + SndAddr: []byte(""), + Signature: sigOk, + } + feeHandler := createTxFeeHandler(gasPrice+1, gasLimit) + + txi, err := createInterceptedTxFromPlainTx(tx, feeHandler) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrInsufficientGasPriceInTx, err) +} + +func TestNewInterceptedTransaction_InsufficientGasLimitShouldErr(t *testing.T) { + t.Parallel() + + gasLimit := uint64(3) + gasPrice := uint64(4) + tx := &dataTransaction.Transaction{ + Nonce: 1, + Value: big.NewInt(2), + Data: "data", + GasLimit: gasLimit, + GasPrice: gasPrice, + RcvAddr: recvAddress, + SndAddr: []byte(""), + Signature: sigOk, + } + feeHandler := createTxFeeHandler(gasPrice, gasLimit+1) + + txi, err := createInterceptedTxFromPlainTx(tx, feeHandler) + + assert.Nil(t, txi) + assert.Equal(t, process.ErrInsufficientGasLimitInTx, err) +} + func TestNewInterceptedTransaction_VerifyFailsShouldErr(t *testing.T) { t.Parallel() @@ -402,7 +494,7 @@ func TestNewInterceptedTransaction_VerifyFailsShouldErr(t *testing.T) { Signature: []byte("wrong sig"), } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Nil(t, txi) assert.Equal(t, errSignerMockVerifySigFails, err) @@ -422,7 +514,7 @@ func TestNewInterceptedTransaction_ShouldWork(t *testing.T) { Signature: sigOk, } - txi, err := createInterceptedTxFromPlainTx(tx) + txi, err := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.NotNil(t, txi) assert.Nil(t, err) @@ -443,7 +535,7 @@ func TestNewInterceptedTransaction_OkValsGettersShouldWork(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) assert.Equal(t, senderShard, txi.SndShard()) assert.Equal(t, recvShard, txi.RcvShard()) @@ -497,6 +589,7 @@ func TestNewInterceptedTransaction_ScTxDeployRecvShardIdShouldBeSendersShardId(t }, }, shardCoordinator, + createFreeTxFeeHandler(), ) assert.Nil(t, err) @@ -520,7 +613,7 @@ func TestNewInterceptedTransaction_GetNonce(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.Nonce() assert.Equal(t, nonce, result) @@ -540,7 +633,7 @@ func TestNewInterceptedTransaction_SenderShardId(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.SenderShardId() assert.Equal(t, senderShard, result) @@ -568,7 +661,7 @@ func TestNewInterceptedTransaction_GetTotalValue(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.TotalValue() assert.Equal(t, expectedValue, result) @@ -588,7 +681,7 @@ func TestNewInterceptedTransaction_GetSenderAddress(t *testing.T) { Signature: sigOk, } - txi, _ := createInterceptedTxFromPlainTx(tx) + txi, _ := createInterceptedTxFromPlainTx(tx, createFreeTxFeeHandler()) result := txi.SenderAddress() assert.NotNil(t, result) diff --git a/process/transaction/interceptor.go b/process/transaction/interceptor.go index c7bfef185bb..21f3818d912 100644 --- a/process/transaction/interceptor.go +++ b/process/transaction/interceptor.go @@ -25,6 +25,8 @@ type TxInterceptor struct { keyGen crypto.KeyGenerator shardCoordinator sharding.Coordinator broadcastCallbackHandler func(buffToSend []byte) + throttler process.InterceptorThrottler + feeHandler process.FeeHandler } // NewTxInterceptor hooks a new interceptor for transactions @@ -37,6 +39,8 @@ func NewTxInterceptor( singleSigner crypto.SingleSigner, keyGen crypto.KeyGenerator, shardCoordinator sharding.Coordinator, + throttler process.InterceptorThrottler, + feeHandler process.FeeHandler, ) (*TxInterceptor, error) { if marshalizer == nil || marshalizer.IsInterfaceNil() { @@ -63,6 +67,12 @@ func NewTxInterceptor( if shardCoordinator == nil || shardCoordinator.IsInterfaceNil() { return nil, process.ErrNilShardCoordinator } + if throttler == nil || throttler.IsInterfaceNil() { + return nil, process.ErrNilThrottler + } + if feeHandler == nil || feeHandler.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } txIntercept := &TxInterceptor{ marshalizer: marshalizer, @@ -73,6 +83,8 @@ func NewTxInterceptor( singleSigner: singleSigner, keyGen: keyGen, shardCoordinator: shardCoordinator, + throttler: throttler, + feeHandler: feeHandler, } return txIntercept, nil @@ -81,10 +93,17 @@ func NewTxInterceptor( // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) func (txi *TxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { + canProcess := txi.throttler.CanProcess() + if !canProcess { + return process.ErrSystemBusy + } + + txi.throttler.StartProcessing() + defer txi.throttler.EndProcessing() + if message == nil || message.IsInterfaceNil() { return process.ErrNilMessage } - if message.Data() == nil { return process.ErrNilDataToProcess } @@ -108,7 +127,9 @@ func (txi *TxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { txi.keyGen, txi.singleSigner, txi.addrConverter, - txi.shardCoordinator) + txi.shardCoordinator, + txi.feeHandler, + ) if err != nil { lastErrEncountered = err @@ -123,6 +144,7 @@ func (txi *TxInterceptor) ProcessReceivedMessage(message p2p.MessageP2P) error { continue } + //TODO: check if throttler needs to be applied also on the following go routine. go txi.processTransaction(txIntercepted) } diff --git a/process/transaction/interceptor_test.go b/process/transaction/interceptor_test.go index b67e6dee4f8..7d15f776f37 100644 --- a/process/transaction/interceptor_test.go +++ b/process/transaction/interceptor_test.go @@ -37,6 +37,7 @@ func TestNewTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( nil, @@ -46,7 +47,10 @@ func TestNewTxInterceptor_NilMarshalizerShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilMarshalizer, err) assert.Nil(t, txi) @@ -60,6 +64,7 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -69,7 +74,10 @@ func TestNewTxInterceptor_NilTransactionPoolShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilTxDataPool, err) assert.Nil(t, txi) @@ -83,6 +91,7 @@ func TestNewTxInterceptor_NilTxHandlerValidatorShouldErr(t *testing.T) { keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -92,7 +101,10 @@ func TestNewTxInterceptor_NilTxHandlerValidatorShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilTxHandlerValidator, err) assert.Nil(t, txi) @@ -106,6 +118,7 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -115,7 +128,10 @@ func TestNewTxInterceptor_NilAddressConverterShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilAddressConverter, err) assert.Nil(t, txi) @@ -130,6 +146,7 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -139,7 +156,10 @@ func TestNewTxInterceptor_NilHasherShouldErr(t *testing.T) { nil, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilHasher, err) assert.Nil(t, txi) @@ -153,6 +173,7 @@ func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { keyGen := &mock.SingleSignKeyGenMock{} oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -162,7 +183,10 @@ func TestNewTxInterceptor_NilSignerShouldErr(t *testing.T) { mock.HasherMock{}, nil, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilSingleSigner, err) assert.Nil(t, txi) @@ -176,6 +200,7 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -185,7 +210,10 @@ func TestNewTxInterceptor_NilKeyGenShouldErr(t *testing.T) { mock.HasherMock{}, signer, nil, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilKeyGen, err) assert.Nil(t, txi) @@ -199,6 +227,7 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { keyGen := &mock.SingleSignKeyGenMock{} txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -208,12 +237,70 @@ func TestNewTxInterceptor_NilShardCoordinatorShouldErr(t *testing.T) { mock.HasherMock{}, signer, keyGen, - nil) + nil, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Equal(t, process.ErrNilShardCoordinator, err) assert.Nil(t, txi) } +func TestNewTxInterceptor_NilThrottlerShouldErr(t *testing.T) { + t.Parallel() + + txPool := &mock.ShardedDataStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + txValidator := createMockedTxValidator() + signer := &mock.SignerMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + + txi, err := transaction.NewTxInterceptor( + &mock.MarshalizerMock{}, + txPool, + txValidator, + addrConv, + mock.HasherMock{}, + signer, + keyGen, + oneSharder, + nil, + &mock.FeeHandlerStub{}, + ) + + assert.Equal(t, process.ErrNilThrottler, err) + assert.Nil(t, txi) +} + +func TestNewTxInterceptor_NilFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + txPool := &mock.ShardedDataStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + txValidator := createMockedTxValidator() + signer := &mock.SignerMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + throttler := &mock.InterceptorThrottlerStub{} + + txi, err := transaction.NewTxInterceptor( + &mock.MarshalizerMock{}, + txPool, + txValidator, + addrConv, + mock.HasherMock{}, + signer, + keyGen, + oneSharder, + throttler, + nil, + ) + + assert.Equal(t, process.ErrNilEconomicsFeeHandler, err) + assert.Nil(t, txi) +} + func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -223,6 +310,7 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{} txi, err := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -232,7 +320,10 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) assert.Nil(t, err) assert.NotNil(t, txi) @@ -240,6 +331,41 @@ func TestNewTxInterceptor_OkValsShouldWork(t *testing.T) { //------- ProcessReceivedMessage +func TestTransactionInterceptor_ProcessReceivedMessageSystemBusyShouldErr(t *testing.T) { + t.Parallel() + + txPool := &mock.ShardedDataStub{} + addrConv := &mock.AddressConverterMock{} + keyGen := &mock.SingleSignKeyGenMock{} + oneSharder := mock.NewOneShardCoordinatorMock() + txValidator := createMockedTxValidator() + signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return false + }, + } + + txi, _ := transaction.NewTxInterceptor( + &mock.MarshalizerMock{}, + txPool, + txValidator, + addrConv, + mock.HasherMock{}, + signer, + keyGen, + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) + + err := txi.ProcessReceivedMessage(nil) + + assert.Equal(t, process.ErrSystemBusy, err) + assert.Equal(t, int32(0), throttler.StartProcessingCount()) + assert.Equal(t, int32(0), throttler.EndProcessingCount()) +} + func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *testing.T) { t.Parallel() @@ -249,6 +375,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *te oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -258,11 +389,16 @@ func TestTransactionInterceptor_ProcessReceivedMessageNilMesssageShouldErr(t *te mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) err := txi.ProcessReceivedMessage(nil) assert.Equal(t, process.ErrNilMessage, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t *testing.T) { @@ -274,6 +410,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerMock{}, @@ -283,13 +424,18 @@ func TestTransactionInterceptor_ProcessReceivedMessageMilMessageDataShouldErr(t mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) msg := &mock.P2PMessageMock{} err := txi.ProcessReceivedMessage(msg) assert.Equal(t, process.ErrNilDataToProcess, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarshalingShouldErr(t *testing.T) { @@ -303,6 +449,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarsha oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerStub{ @@ -316,7 +467,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarsha mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) msg := &mock.P2PMessageMock{ DataField: make([]byte, 0), @@ -325,6 +479,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageMarshalizerFailsAtUnmarsha err := txi.ProcessReceivedMessage(msg) assert.Equal(t, errMarshalizer, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShouldErr(t *testing.T) { @@ -336,6 +492,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShou oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( &mock.MarshalizerStub{ @@ -352,7 +513,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShou mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) msg := &mock.P2PMessageMock{ DataField: make([]byte, 0), @@ -361,6 +525,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageNoTransactionInMessageShou err := txi.ProcessReceivedMessage(msg) assert.Equal(t, process.ErrNoTransactionInMessage, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t *testing.T) { @@ -374,6 +540,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t oneSharder := mock.NewOneShardCoordinatorMock() txValidator := createMockedTxValidator() signer := &mock.SignerMock{} + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -383,7 +554,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + &mock.FeeHandlerStub{}, + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -405,6 +579,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedShouldErr(t err := txi.ProcessReceivedMessage(msg) assert.Equal(t, process.ErrNilSignature, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsShouldErrAndFilter(t *testing.T) { @@ -432,6 +608,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsS return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -441,7 +622,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsS mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + createFreeTxFeeHandler(), + ) tx1 := &dataTransaction.Transaction{ Nonce: 1, @@ -485,6 +669,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageIntegrityFailedWithTwoTxsS txRecovered := &dataTransaction.Transaction{} _ = marshalizer.Unmarshal(txRecovered, txBuffRecovered[0]) assert.Equal(t, tx2, txRecovered) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t *testing.T) { @@ -509,6 +695,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t return errExpected }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -518,7 +709,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + createFreeTxFeeHandler(), + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -540,6 +734,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageVerifySigFailsShouldErr(t err := txi.ProcessReceivedMessage(msg) assert.Equal(t, errExpected, err) + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork(t *testing.T) { @@ -567,6 +763,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork( return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -576,7 +777,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork( mock.HasherMock{}, signer, keyGen, - oneSharder) + oneSharder, + throttler, + createFreeTxFeeHandler(), + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -610,6 +814,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsSameShardShouldWork( case <-time.After(durTimeout): assert.Fail(t, "timeout while waiting for tx to be inserted in the pool") } + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWork(t *testing.T) { @@ -636,6 +842,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWor return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -645,7 +856,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWor mock.HasherMock{}, signer, keyGen, - multiSharder) + multiSharder, + throttler, + createFreeTxFeeHandler(), + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -678,6 +892,8 @@ func TestTransactionInterceptor_ProcessReceivedMessageOkValsOtherShardsShouldWor assert.Fail(t, "should have not add tx in pool") case <-time.After(durTimeout): } + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t *testing.T) { @@ -716,6 +932,11 @@ func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t * return nil }, } + throttler := &mock.InterceptorThrottlerStub{ + CanProcessCalled: func() bool { + return true + }, + } txi, _ := transaction.NewTxInterceptor( marshalizer, @@ -725,7 +946,10 @@ func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t * mock.HasherMock{}, signer, keyGen, - multiSharder) + multiSharder, + throttler, + createFreeTxFeeHandler(), + ) txNewer := &dataTransaction.Transaction{ Nonce: 1, @@ -758,4 +982,6 @@ func TestTransactionInterceptor_ProcessReceivedMessageTxNotValidShouldNotAdd(t * assert.Fail(t, "should have not add tx in pool") case <-time.After(durTimeout): } + assert.Equal(t, int32(1), throttler.EndProcessingCount()) + assert.Equal(t, int32(1), throttler.StartProcessingCount()) } diff --git a/process/transaction/process.go b/process/transaction/process.go index f7ebc49c800..020fb06e254 100644 --- a/process/transaction/process.go +++ b/process/transaction/process.go @@ -3,6 +3,7 @@ package transaction import ( "bytes" "math/big" + "sync" "github.com/ElrondNetwork/elrond-go/core/logger" "github.com/ElrondNetwork/elrond-go/data/state" @@ -22,7 +23,11 @@ type txProcessor struct { hasher hashing.Hasher scProcessor process.SmartContractProcessor marshalizer marshal.Marshalizer + txFeeHandler process.TransactionFeeHandler shardCoordinator sharding.Coordinator + txTypeHandler process.TxTypeHandler + economicsFee process.FeeHandler + mutTxFee sync.RWMutex } // NewTxProcessor creates a new txProcessor engine @@ -33,6 +38,9 @@ func NewTxProcessor( marshalizer marshal.Marshalizer, shardCoordinator sharding.Coordinator, scProcessor process.SmartContractProcessor, + txFeeHandler process.TransactionFeeHandler, + txTypeHandler process.TxTypeHandler, + economicsFee process.FeeHandler, ) (*txProcessor, error) { if accounts == nil || accounts.IsInterfaceNil() { @@ -53,6 +61,15 @@ func NewTxProcessor( if scProcessor == nil || scProcessor.IsInterfaceNil() { return nil, process.ErrNilSmartContractProcessor } + if txFeeHandler == nil || txFeeHandler.IsInterfaceNil() { + return nil, process.ErrNilUnsignedTxHandler + } + if txTypeHandler == nil || txTypeHandler.IsInterfaceNil() { + return nil, process.ErrNilTxTypeHandler + } + if economicsFee == nil || economicsFee.IsInterfaceNil() { + return nil, process.ErrNilEconomicsFeeHandler + } return &txProcessor{ accounts: accounts, @@ -61,12 +78,16 @@ func NewTxProcessor( marshalizer: marshalizer, shardCoordinator: shardCoordinator, scProcessor: scProcessor, + txFeeHandler: txFeeHandler, + txTypeHandler: txTypeHandler, + economicsFee: economicsFee, + mutTxFee: sync.RWMutex{}, }, nil } // ProcessTransaction modifies the account states in respect with the transaction data func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, roundIndex uint64) error { - if tx == nil { + if tx == nil || tx.IsInterfaceNil() { return process.ErrNilTransaction } @@ -85,7 +106,7 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return err } - txType, err := txProc.scProcessor.ComputeTransactionType(tx) + txType, err := txProc.txTypeHandler.ComputeTransactionType(tx) if err != nil { return err } @@ -102,6 +123,41 @@ func (txProc *txProcessor) ProcessTransaction(tx *transaction.Transaction, round return process.ErrWrongTransaction } +func (txProc *txProcessor) processTxFee(tx *transaction.Transaction, acntSnd *state.Account) (*big.Int, error) { + if acntSnd == nil { + return nil, nil + } + + cost := big.NewInt(0) + cost = cost.Mul(big.NewInt(0).SetUint64(tx.GasPrice), big.NewInt(0).SetUint64(tx.GasLimit)) + + txDataLen := int64(len(tx.Data)) + txProc.mutTxFee.RLock() + minTxFee := big.NewInt(0).SetUint64(txProc.economicsFee.MinGasLimit()) + minTxFee.Mul(minTxFee, big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) + + minFee := big.NewInt(0) + minFee.Mul(big.NewInt(txDataLen), big.NewInt(0).SetUint64(txProc.economicsFee.MinGasPrice())) + minFee.Add(minFee, minTxFee) + txProc.mutTxFee.RUnlock() + + if minFee.Cmp(cost) > 0 { + return nil, process.ErrNotEnoughFeeInTransactions + } + + if acntSnd.Balance.Cmp(cost) < 0 { + return nil, process.ErrInsufficientFunds + } + + operation := big.NewInt(0) + err := acntSnd.SetBalanceWithJournal(operation.Sub(acntSnd.Balance, cost)) + if err != nil { + return nil, err + } + + return cost, nil +} + func (txProc *txProcessor) processMoveBalance( tx *transaction.Transaction, adrSrc, adrDst state.AddressContainer, @@ -114,6 +170,11 @@ func (txProc *txProcessor) processMoveBalance( return err } + txFee, err := txProc.processTxFee(tx, acntSrc) + if err != nil { + return err + } + value := tx.Value err = txProc.moveBalances(acntSrc, acntDst, value) @@ -129,6 +190,8 @@ func (txProc *txProcessor) processMoveBalance( } } + txProc.txFeeHandler.ProcessTransactionFee(txFee) + return nil } diff --git a/process/transaction/process_test.go b/process/transaction/process_test.go index 7f745b6ec49..073e59bef35 100644 --- a/process/transaction/process_test.go +++ b/process/transaction/process_test.go @@ -7,11 +7,12 @@ import ( "math/big" "testing" + "github.com/ElrondNetwork/elrond-go/data" "github.com/ElrondNetwork/elrond-go/data/state" "github.com/ElrondNetwork/elrond-go/data/transaction" "github.com/ElrondNetwork/elrond-go/process" + "github.com/ElrondNetwork/elrond-go/process/coordinator" "github.com/ElrondNetwork/elrond-go/process/mock" - "github.com/ElrondNetwork/elrond-go/process/smartContract" txproc "github.com/ElrondNetwork/elrond-go/process/transaction" "github.com/stretchr/testify/assert" ) @@ -23,6 +24,20 @@ func generateRandomByteSlice(size int) []byte { return buff } +func FeeHandlerMock() *mock.FeeHandlerStub { + return &mock.FeeHandlerStub{ + MinGasPriceCalled: func() uint64 { + return 0 + }, + MinGasLimitCalled: func() uint64 { + return 5 + }, + MinTxFeeCalled: func() uint64 { + return 0 + }, + } +} + func createAccountStub(sndAddr, rcvAddr []byte, acntSrc, acntDst *state.Account, ) *mock.AccountsStub { @@ -51,6 +66,9 @@ func createTxProcessor() txproc.TxProcessor { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) return txProc @@ -68,6 +86,9 @@ func TestNewTxProcessor_NilAccountsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilAccountsAdapter, err) @@ -84,6 +105,9 @@ func TestNewTxProcessor_NilHasherShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilHasher, err) @@ -100,6 +124,9 @@ func TestNewTxProcessor_NilAddressConverterMockShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilAddressConverter, err) @@ -116,6 +143,9 @@ func TestNewTxProcessor_NilMarshalizerMockShouldErr(t *testing.T) { nil, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilMarshalizer, err) @@ -132,6 +162,9 @@ func TestNewTxProcessor_NilShardCoordinatorMockShouldErr(t *testing.T) { &mock.MarshalizerMock{}, nil, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilShardCoordinator, err) @@ -148,12 +181,34 @@ func TestNewTxProcessor_NilSCProcessorShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), nil, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Equal(t, process.ErrNilSmartContractProcessor, err) assert.Nil(t, txProc) } +func TestNewTxProcessor_NilTxFeeHandlerShouldErr(t *testing.T) { + t.Parallel() + + txProc, err := txproc.NewTxProcessor( + &mock.AccountsStub{}, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + mock.NewOneShardCoordinatorMock(), + &mock.SCProcessorMock{}, + nil, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), + ) + + assert.Equal(t, process.ErrNilUnsignedTxHandler, err) + assert.Nil(t, txProc) +} + func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -164,6 +219,9 @@ func TestNewTxProcessor_OkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) assert.Nil(t, err) @@ -184,6 +242,9 @@ func TestTxProcessor_GetAddressErrAddressConvShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) addressConv.Fail = true @@ -223,6 +284,9 @@ func TestTxProcessor_GetAccountsShouldErrNilAddressContainer(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) adr1 := mock.NewAddressMock([]byte{65}) @@ -247,6 +311,9 @@ func TestTxProcessor_GetAccountsMalfunctionAccountsShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) adr1 := mock.NewAddressMock([]byte{65}) @@ -288,6 +355,9 @@ func TestTxProcessor_GetAccountsOkValsSrcShouldWork(t *testing.T) { &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -338,6 +408,9 @@ func TestTxProcessor_GetAccountsOkValsDsthouldWork(t *testing.T) { &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) shardCoordinator.ComputeIdCalled = func(container state.AddressContainer) uint32 { @@ -373,6 +446,9 @@ func TestTxProcessor_GetAccountsOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) a1, a2, err := execTx.GetAccounts(adr1, adr2) @@ -399,6 +475,9 @@ func TestTxProcessor_GetSameAccountShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) a1, a2, err := execTx.GetAccounts(adr1, adr1) @@ -639,6 +718,9 @@ func TestTxProcessor_ProcessTransactionErrAddressConvShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) addressConv.Fail = true @@ -659,6 +741,9 @@ func TestTxProcessor_ProcessTransactionMalfunctionAccountsShouldErr(t *testing.T &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) tx := transaction.Transaction{} @@ -695,6 +780,9 @@ func TestTxProcessor_ProcessCheckNotPassShouldErr(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -746,6 +834,9 @@ func TestTxProcessor_ProcessCheckShouldPassWhenAdrSrcIsNotInNodeShard(t *testing &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -789,12 +880,15 @@ func TestTxProcessor_ProcessMoveBalancesShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) - assert.Equal(t, 3, journalizeCalled) - assert.Equal(t, 3, saveAccountCalled) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) } func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t *testing.T) { @@ -842,6 +936,9 @@ func TestTxProcessor_ProcessMoveBalancesShouldPassWhenAdrSrcIsNotInNodeShard(t * &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -895,6 +992,9 @@ func TestTxProcessor_ProcessIncreaseNonceShouldPassWhenAdrSrcIsNotInNodeShard(t &mock.MarshalizerMock{}, shardCoordinator, &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -942,6 +1042,9 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -949,8 +1052,61 @@ func TestTxProcessor_ProcessOkValsShouldWork(t *testing.T) { assert.Equal(t, uint64(5), acntSrc.Nonce) assert.Equal(t, big.NewInt(29), acntSrc.Balance) assert.Equal(t, big.NewInt(71), acntDst.Balance) - assert.Equal(t, 3, journalizeCalled) - assert.Equal(t, 3, saveAccountCalled) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) +} + +func TestTxProcessor_MoveBalanceWithFeesShouldWork(t *testing.T) { + journalizeCalled := 0 + saveAccountCalled := 0 + tracker := &mock.AccountTrackerStub{ + JournalizeCalled: func(entry state.JournalEntry) { + journalizeCalled++ + }, + SaveAccountCalled: func(accountHandler state.AccountHandler) error { + saveAccountCalled++ + return nil + }, + } + + tx := transaction.Transaction{} + tx.Nonce = 4 + tx.SndAddr = []byte("SRC") + tx.RcvAddr = []byte("DST") + tx.Value = big.NewInt(61) + tx.GasPrice = 2 + tx.GasLimit = 2 + + acntSrc, err := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) + assert.Nil(t, err) + acntDst, err := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) + assert.Nil(t, err) + + acntSrc.Nonce = 4 + acntSrc.Balance = big.NewInt(90) + acntDst.Balance = big.NewInt(10) + + accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) + + execTx, _ := txproc.NewTxProcessor( + accounts, + mock.HasherMock{}, + &mock.AddressConverterMock{}, + &mock.MarshalizerMock{}, + mock.NewOneShardCoordinatorMock(), + &mock.SCProcessorMock{}, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{}, + FeeHandlerMock(), + ) + + err = execTx.ProcessTransaction(&tx, 4) + assert.Nil(t, err) + assert.Equal(t, uint64(5), acntSrc.Nonce) + assert.Equal(t, big.NewInt(25), acntSrc.Balance) + assert.Equal(t, big.NewInt(71), acntDst.Balance) + assert.Equal(t, 4, journalizeCalled) + assert.Equal(t, 4, saveAccountCalled) } func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { @@ -975,6 +1131,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { tx.SndAddr = []byte("SRC") tx.RcvAddr = generateRandomByteSlice(addrConverter.AddressLen()) tx.Value = big.NewInt(45) + tx.GasPrice = 1 + tx.GasLimit = 1 acntSrc, err := state.NewAccount(mock.NewAddressMock(tx.SndAddr), tracker) assert.Nil(t, err) @@ -982,25 +1140,12 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { acntDst, err := state.NewAccount(mock.NewAddressMock(tx.RcvAddr), tracker) assert.Nil(t, err) - acntSrc.Balance = big.NewInt(45) + acntSrc.Balance = big.NewInt(46) acntDst.SetCode([]byte{65}) accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}, - ) - scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType + wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error { wasCalled = true @@ -1014,6 +1159,13 @@ func TestTxProcessor_ProcessTransactionScTxShouldWork(t *testing.T) { &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), scProcessorMock, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{ + ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (transactionType process.TransactionType, e error) { + return process.SCInvoking, nil + }, + }, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -1055,19 +1207,8 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - mock.NewOneShardCoordinatorMock(), - &mock.IntermediateTransactionHandlerMock{}) scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error { wasCalled = true @@ -1081,6 +1222,11 @@ func TestTxProcessor_ProcessTransactionScTxShouldReturnErrWhenExecutionFails(t * &mock.MarshalizerMock{}, mock.NewOneShardCoordinatorMock(), scProcessorMock, + &mock.UnsignedTxHandlerMock{}, + &mock.TxTypeHandlerMock{ComputeTransactionTypeCalled: func(tx data.TransactionHandler) (transactionType process.TransactionType, e error) { + return process.SCInvoking, nil + }}, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) @@ -1132,24 +1278,18 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod accounts := createAccountStub(tx.SndAddr, tx.RcvAddr, acntSrc, acntDst) - scProcessor, err := smartContract.NewSmartContractProcessor( - &mock.VMContainerMock{}, - &mock.ArgumentParserMock{}, - mock.HasherMock{}, - &mock.MarshalizerMock{}, - accounts, - &mock.TemporaryAccountsHandlerMock{}, - addrConverter, - shardCoordinator, - &mock.IntermediateTransactionHandlerMock{}) scProcessorMock := &mock.SCProcessorMock{} - scProcessorMock.ComputeTransactionTypeCalled = scProcessor.ComputeTransactionType wasCalled := false scProcessorMock.ExecuteSmartContractTransactionCalled = func(tx *transaction.Transaction, acntSrc, acntDst state.AccountHandler, round uint64) error { wasCalled = true return process.ErrNoVM } + computeType, _ := coordinator.NewTxTypeHandler( + &mock.AddressConverterMock{}, + shardCoordinator, + accounts) + execTx, _ := txproc.NewTxProcessor( accounts, mock.HasherMock{}, @@ -1157,11 +1297,14 @@ func TestTxProcessor_ProcessTransactionScTxShouldNotBeCalledWhenAdrDstIsNotInNod &mock.MarshalizerMock{}, shardCoordinator, scProcessorMock, + &mock.UnsignedTxHandlerMock{}, + computeType, + FeeHandlerMock(), ) err = execTx.ProcessTransaction(&tx, 4) assert.Nil(t, err) assert.False(t, wasCalled) - assert.Equal(t, 2, journalizeCalled) - assert.Equal(t, 2, saveAccountCalled) + assert.Equal(t, 3, journalizeCalled) + assert.Equal(t, 3, saveAccountCalled) } diff --git a/sharding/errors.go b/sharding/errors.go index 5405d196ef2..71820dfa26a 100644 --- a/sharding/errors.go +++ b/sharding/errors.go @@ -13,6 +13,9 @@ var ErrInvalidShardId = errors.New("shard id must be smaller than the total numb // ErrShardIdOutOfRange signals an error when shard id is out of range var ErrShardIdOutOfRange = errors.New("shard id out of range") +// ErrNilPubKey signals that the public key is nil +var ErrNilPubKey = errors.New("nil public key") + // ErrNoPubKeys signals an error when public keys are missing var ErrNoPubKeys = errors.New("no public keys defined") @@ -28,6 +31,9 @@ var ErrNilAddressConverter = errors.New("trying to set nil address converter") // ErrCouldNotParsePubKey signals that a given public key could not be parsed var ErrCouldNotParsePubKey = errors.New("could not parse node's public key") +// ErrCouldNotParseAddress signals that a given address could not be parsed +var ErrCouldNotParseAddress = errors.New("could not parse node's address") + // ErrNegativeOrZeroConsensusGroupSize signals that an invalid consensus group size has been provided var ErrNegativeOrZeroConsensusGroupSize = errors.New("negative or zero consensus group size") @@ -36,3 +42,42 @@ var ErrMinNodesPerShardSmallerThanConsensusSize = errors.New("minimum nodes per // ErrNodesSizeSmallerThanMinNoOfNodes signals that there are not enough nodes defined in genesis file var ErrNodesSizeSmallerThanMinNoOfNodes = errors.New("length of nodes defined is smaller than min nodes per shard required") + +// ErrNilInputNodesMap signals that a nil nodes map was provided +var ErrNilInputNodesMap = errors.New("nil input nodes map") + +// ErrSmallShardEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallShardEligibleListSize = errors.New("small shard eligible list size") + +// ErrSmallMetachainEligibleListSize signals that the eligible validators list's size is less than the consensus size +var ErrSmallMetachainEligibleListSize = errors.New("small metachain eligible list size") + +// ErrInvalidConsensusGroupSize signals that the consensus size is invalid (e.g. value is negative) +var ErrInvalidConsensusGroupSize = errors.New("invalid consensus group size") + +// ErrEligibleSelectionMismatch signals a mismatch between the eligible list and the group selection bitmap +var ErrEligibleSelectionMismatch = errors.New("invalid eligible validator selection") + +// ErrEligibleTooManySelections signals an invalid selection for consensus group +var ErrEligibleTooManySelections = errors.New("too many selections for consensus group") + +// ErrEligibleTooFewSelections signals an invalid selection for consensus group +var ErrEligibleTooFewSelections = errors.New("too few selections for consensus group") + +// ErrNilRandomness signals that a nil randomness source has been provided +var ErrNilRandomness = errors.New("nil randomness source") + +// ErrNilHasher signals that a nil hasher has been provided +var ErrNilHasher = errors.New("nil hasher") + +// ErrNilStake signals that a nil stake structure has been provided +var ErrNilStake = errors.New("nil stake") + +// ErrNegativeStake signals that the stake is negative +var ErrNegativeStake = errors.New("negative stake") + +// ErrNilAddress signals that the address is nil +var ErrNilAddress = errors.New("nil address") + +// ErrValidatorNotFound signals that the validator has not been found +var ErrValidatorNotFound = errors.New("validator not found") diff --git a/sharding/export_test.go b/sharding/export_test.go index 4d6cd6c7e5e..cf6427cb891 100644 --- a/sharding/export_test.go +++ b/sharding/export_test.go @@ -24,10 +24,14 @@ func (ns *NodesSetup) ProcessMetaChainAssigment() { ns.processMetaChainAssigment() } -func (ns *NodesSetup) CreateInitialNodesPubKeys() { - ns.createInitialNodesPubKeys() +func (ns *NodesSetup) CreateInitialNodesInfo() { + ns.createInitialNodesInfo() } func CommunicationIdentifierBetweenShards(shardId1 uint32, shardId2 uint32) string { return communicationIdentifierBetweenShards(shardId1, shardId2) } + +func (ihgs *indexHashedNodesCoordinator) EligibleList() []Validator { + return ihgs.nodesMap[ihgs.shardId] +} diff --git a/sharding/indexHashedNodesCoordinator.go b/sharding/indexHashedNodesCoordinator.go new file mode 100644 index 00000000000..bb724bc5fa0 --- /dev/null +++ b/sharding/indexHashedNodesCoordinator.go @@ -0,0 +1,325 @@ +package sharding + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + + "github.com/ElrondNetwork/elrond-go/core" + "github.com/ElrondNetwork/elrond-go/hashing" +) + +type indexHashedNodesCoordinator struct { + nbShards uint32 + shardId uint32 + hasher hashing.Hasher + nodesMap map[uint32][]Validator + shardConsensusGroupSize int + metaConsensusGroupSize int +} + +// NewIndexHashedNodesCoordinator creates a new index hashed group selector +func NewIndexHashedNodesCoordinator( + shardConsensusGroupSize int, + metaConsensusGroupSize int, + hasher hashing.Hasher, + shardId uint32, + nbShards uint32, + nodes map[uint32][]Validator, +) (*indexHashedNodesCoordinator, error) { + if shardConsensusGroupSize < 1 || metaConsensusGroupSize < 1 { + return nil, ErrInvalidConsensusGroupSize + } + + if nbShards < 1 { + return nil, ErrInvalidNumberOfShards + } + + if shardId >= nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + + if hasher == nil { + return nil, ErrNilHasher + } + + ihgs := &indexHashedNodesCoordinator{ + nbShards: nbShards, + shardId: shardId, + hasher: hasher, + nodesMap: make(map[uint32][]Validator), + shardConsensusGroupSize: shardConsensusGroupSize, + metaConsensusGroupSize: metaConsensusGroupSize, + } + + err := ihgs.SetNodesPerShards(nodes) + if err != nil { + return nil, err + } + + return ihgs, nil +} + +// SetNodesPerShards loads the distribution of nodes per shard into the nodes management component +func (ihgs *indexHashedNodesCoordinator) SetNodesPerShards(nodes map[uint32][]Validator) error { + if nodes == nil { + return ErrNilInputNodesMap + } + + nodesList, ok := nodes[MetachainShardId] + if ok && len(nodesList) < ihgs.metaConsensusGroupSize { + return ErrSmallMetachainEligibleListSize + } + + for shardId := uint32(0); shardId < ihgs.nbShards; shardId++ { + nbNodesShard := len(nodes[shardId]) + if nbNodesShard < ihgs.shardConsensusGroupSize { + return ErrSmallShardEligibleListSize + } + } + + ihgs.nodesMap = nodes + + return nil +} + +// ComputeValidatorsGroup will generate a list of validators based on the the eligible list, +// consensus group size and a randomness source +// Steps: +// 1. generate expanded eligible list by multiplying entries from shards' eligible list according to stake and rating -> TODO +// 2. for each value in [0, consensusGroupSize), compute proposedindex = Hash( [index as string] CONCAT randomness) % len(eligible list) +// 3. if proposed index is already in the temp validator list, then proposedIndex++ (and then % len(eligible list) as to not +// exceed the maximum index value permitted by the validator list), and then recheck against temp validator list until +// the item at the new proposed index is not found in the list. This new proposed index will be called checked index +// 4. the item at the checked index is appended in the temp validator list +func (ihgs *indexHashedNodesCoordinator) ComputeValidatorsGroup( + randomness []byte, + round uint64, + shardId uint32, +) (validatorsGroup []Validator, err error) { + if randomness == nil { + return nil, ErrNilRandomness + } + + if shardId >= ihgs.nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + + if ihgs == nil { + return nil, ErrNilRandomness + } + + tempList := make([]Validator, 0) + consensusSize := ihgs.consensusGroupSize(shardId) + randomness = []byte(fmt.Sprintf("%d-%s", round, core.ToB64(randomness))) + + // TODO: pre-compute eligible list and update only on rating change. + expandedList := ihgs.expandEligibleList(shardId) + lenExpandedList := len(expandedList) + + for startIdx := 0; startIdx < consensusSize; startIdx++ { + proposedIndex := ihgs.computeListIndex(startIdx, lenExpandedList, string(randomness)) + checkedIndex := ihgs.checkIndex(proposedIndex, expandedList, tempList) + tempList = append(tempList, expandedList[checkedIndex]) + } + + return tempList, nil +} + +// GetValidatorWithPublicKey gets the validator with the given public key +func (ihgs *indexHashedNodesCoordinator) GetValidatorWithPublicKey(publicKey []byte) (Validator, uint32, error) { + if publicKey == nil { + return nil, 0, ErrNilPubKey + } + + for shardId, shardEligible := range ihgs.nodesMap { + for i := 0; i < len(shardEligible); i++ { + if bytes.Equal(publicKey, shardEligible[i].PubKey()) { + return shardEligible[i], shardId, nil + } + } + } + + return nil, 0, ErrValidatorNotFound +} + +// GetValidatorsPublicKeys calculates the validators consensus group for a specific shard, randomness and round number, +// returning their public keys +func (ihgs *indexHashedNodesCoordinator) GetValidatorsPublicKeys( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range consensusNodes { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +// GetValidatorsRewardsAddresses calculates the validator consensus group for a specific shard, randomness and round +// number, returning their staking/rewards addresses +func (ihgs *indexHashedNodesCoordinator) GetValidatorsRewardsAddresses( + randomness []byte, + round uint64, + shardId uint32, +) ([]string, error) { + consensusNodes, err := ihgs.ComputeValidatorsGroup(randomness, round, shardId) + if err != nil { + return nil, err + } + + addresses := make([]string, len(consensusNodes)) + for i, v := range consensusNodes { + addresses[i] = string(v.Address()) + } + + return addresses, nil +} + +// GetSelectedPublicKeys returns the stringified public keys of the marked validators in the selection bitmap +// TODO: This function needs to be revised when the requirements are clarified +func (ihgs *indexHashedNodesCoordinator) GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) { + if shardId >= ihgs.nbShards && shardId != MetachainShardId { + return nil, ErrInvalidShardId + } + + selectionLen := uint16(len(selection) * 8) // 8 selection bits in each byte + shardEligibleLen := uint16(len(ihgs.nodesMap[shardId])) + invalidSelection := selectionLen < shardEligibleLen + + if invalidSelection { + return nil, ErrEligibleSelectionMismatch + } + + consensusSize := ihgs.consensusGroupSize(shardId) + publicKeys = make([]string, consensusSize) + cnt := 0 + + for i := uint16(0); i < shardEligibleLen; i++ { + isSelected := (selection[i/8] & (1 << (i % 8))) != 0 + + if !isSelected { + continue + } + + publicKeys[cnt] = string(ihgs.nodesMap[shardId][i].PubKey()) + cnt++ + + if cnt > consensusSize { + return nil, ErrEligibleTooManySelections + } + } + + if cnt < consensusSize { + return nil, ErrEligibleTooFewSelections + } + + return publicKeys, nil +} + +// GetAllValidatorsPublicKeys will return all validators public keys for all shards +func (ihgs *indexHashedNodesCoordinator) GetAllValidatorsPublicKeys() map[uint32][][]byte { + validatorsPubKeys := make(map[uint32][][]byte) + + for shardId, shardEligible := range ihgs.nodesMap { + for i := 0; i < len(shardEligible); i++ { + validatorsPubKeys[shardId] = append(validatorsPubKeys[shardId], ihgs.nodesMap[shardId][i].PubKey()) + } + } + + return validatorsPubKeys +} + +// GetValidatorsIndexes will return validators indexes for a block +func (ihgs *indexHashedNodesCoordinator) GetValidatorsIndexes(publicKeys []string) []uint64 { + validatorsPubKeys := ihgs.GetAllValidatorsPublicKeys() + signersIndexes := make([]uint64, 0) + + for _, pubKey := range publicKeys { + for index, value := range validatorsPubKeys[ihgs.shardId] { + if bytes.Equal([]byte(pubKey), value) { + signersIndexes = append(signersIndexes, uint64(index)) + } + } + } + + return signersIndexes +} + +func (ihgs *indexHashedNodesCoordinator) expandEligibleList(shardId uint32) []Validator { + //TODO implement an expand eligible list variant + return ihgs.nodesMap[shardId] +} + +// computeListIndex computes a proposed index from expanded eligible list +func (ihgs *indexHashedNodesCoordinator) computeListIndex(currentIndex int, lenList int, randomSource string) int { + buffCurrentIndex := make([]byte, 8) + binary.BigEndian.PutUint64(buffCurrentIndex, uint64(currentIndex)) + + indexHash := ihgs.hasher.Compute(string(buffCurrentIndex) + randomSource) + + computedLargeIndex := big.NewInt(0) + computedLargeIndex.SetBytes(indexHash) + lenExpandedEligibleList := big.NewInt(int64(lenList)) + + // computedListIndex = computedLargeIndex % len(expandedEligibleList) + computedListIndex := big.NewInt(0).Mod(computedLargeIndex, lenExpandedEligibleList).Int64() + + return int(computedListIndex) +} + +// checkIndex returns a checked index starting from a proposed index +func (ihgs *indexHashedNodesCoordinator) checkIndex( + proposedIndex int, + eligibleList []Validator, + selectedList []Validator, +) int { + + for { + v := eligibleList[proposedIndex] + + if ihgs.validatorIsInList(v, selectedList) { + proposedIndex++ + proposedIndex = proposedIndex % len(eligibleList) + continue + } + + return proposedIndex + } +} + +// validatorIsInList returns true if a validator has been found in provided list +func (ihgs *indexHashedNodesCoordinator) validatorIsInList(v Validator, list []Validator) bool { + for i := 0; i < len(list); i++ { + if bytes.Equal(v.PubKey(), list[i].PubKey()) { + return true + } + } + + return false +} + +func (ihgs *indexHashedNodesCoordinator) consensusGroupSize(shardId uint32) int { + if shardId == MetachainShardId { + return ihgs.metaConsensusGroupSize + } + + return ihgs.shardConsensusGroupSize +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ihgs *indexHashedNodesCoordinator) IsInterfaceNil() bool { + if ihgs == nil { + return true + } + return false +} diff --git a/sharding/indexHashedNodesCoordinator_test.go b/sharding/indexHashedNodesCoordinator_test.go new file mode 100644 index 00000000000..2d676d81a08 --- /dev/null +++ b/sharding/indexHashedNodesCoordinator_test.go @@ -0,0 +1,618 @@ +package sharding_test + +import ( + "encoding/binary" + "fmt" + "github.com/ElrondNetwork/elrond-go/core" + "math/big" + "strconv" + "testing" + + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/ElrondNetwork/elrond-go/sharding/mock" + "github.com/stretchr/testify/assert" +) + +func convertBigIntToBytes(value *big.Int) []byte { + return value.Bytes() +} + +func uint64ToBytes(value uint64) []byte { + buff := make([]byte, 8) + + binary.BigEndian.PutUint64(buff, value) + return buff +} + +func createDummyNodesMap() map[uint32][]sharding.Validator { + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1"), []byte("addr1")), + } + + listMeta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 1, []byte("pkMeta1"), []byte("addrMeta1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pkMeta2"), []byte("addrMeta2")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + nodesMap[sharding.MetachainShardId] = listMeta + + return nodesMap +} + +func genRandSource(round uint64, randomness string) string { + return fmt.Sprintf("%d-%s", round, core.ToB64([]byte(randomness))) +} + +//------- NewIndexHashedNodesCoordinator + +func TestNewIndexHashedGroupSelector_NilHasherShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + nil, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrNilHasher, err) +} + +func TestNewIndexHashedGroupSelector_InvalidConsensusGroupSizeShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 0, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) +} + +func TestNewIndexHashedGroupSelector_OkValsShouldWork(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.NotNil(t, ihgs) + assert.Nil(t, err) +} + +//------- LoadEligibleList + +func TestIndexHashedGroupSelector_SetNilNodesMapShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Equal(t, sharding.ErrNilInputNodesMap, ihgs.SetNodesPerShards(nil)) +} + +func TestIndexHashedGroupSelector_OkValShouldWork(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, err) + assert.Equal(t, nodesMap[0], ihgs.EligibleList()) +} + +//------- ComputeValidatorsGroup + +func TestIndexHashedGroupSelector_NewCoordinatorGroup0SizeShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 0, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrInvalidConsensusGroupSize, err) +} + +func TestIndexHashedGroupSelector_NewCoordinatorTooFewNodesShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, err := sharding.NewIndexHashedNodesCoordinator( + 10, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + assert.Nil(t, ihgs) + assert.Equal(t, sharding.ErrSmallShardEligibleListSize, err) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupNilRandomnessShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup(nil, 0, 0) + + assert.Nil(t, list2) + assert.Equal(t, sharding.ErrNilRandomness, err) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupInvalidShardIdShouldErr(t *testing.T) { + t.Parallel() + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte("radomness"), 0, 5) + + assert.Nil(t, list2) + assert.Equal(t, sharding.ErrInvalidShardId, err) +} + +//------- functionality tests + +func TestIndexHashedGroupSelector_ComputeValidatorsGroup1ValidatorShouldReturnSame(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte("randomness"), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, list, list2) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2Validators(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + + //this will return the list in order: + //element 0 will be first element + //element 1 will be the second + hasher.ComputeCalled = func(s string) []byte { + if string(uint64ToBytes(0))+randomness == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + if string(uint64ToBytes(1))+randomness == s { + return convertBigIntToBytes(big.NewInt(1)) + } + + return nil + } + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, nodesMap[0], list2) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsRevertOrder(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + randSource := genRandSource(0, randomness) + + //this will return the list in reverse order: + //element 0 will be the second + //element 1 will be the first + hasher.ComputeCalled = func(s string) []byte { + if string(uint64ToBytes(0))+randSource == s { + return convertBigIntToBytes(big.NewInt(1)) + } + + if string(uint64ToBytes(1))+randSource == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + return nil + } + + validator0 := mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 3, []byte("pk1"), []byte("addr1")) + + list := []sharding.Validator{ + validator0, + validator1, + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + metaNode, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta"), []byte("addressMeta")) + nodesMap[sharding.MetachainShardId] = []sharding.Validator{metaNode} + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, validator0, list2[1]) + assert.Equal(t, validator1, list2[0]) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest2ValidatorsSameIndex(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + + //this will return the list in order: + //element 0 will be the first + //element 1 will be the second as the same index is being returned and 0 is already in list + hasher.ComputeCalled = func(s string) []byte { + if string(uint64ToBytes(0))+randomness == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + if string(uint64ToBytes(1))+randomness == s { + return convertBigIntToBytes(big.NewInt(0)) + } + + return nil + } + + nodesMap := createDummyNodesMap() + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 2, + 1, + hasher, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, nodesMap[0], list2) +} + +func TestIndexHashedGroupSelector_ComputeValidatorsGroupTest6From10ValidatorsShouldWork(t *testing.T) { + t.Parallel() + + hasher := &mock.HasherStub{} + + randomness := "randomness" + randomnessWithRound := genRandSource(0, randomness) + + //script: + // for index 0, hasher will return 11 which will translate to 1, so 1 is the first element + // for index 1, hasher will return 1 which will translate to 1, 1 is already picked, try the next, 2 is the second element + // for index 2, hasher will return 9 which will translate to 9, 9 is the 3-rd element + // for index 3, hasher will return 9 which will translate to 9, 9 is already picked, try the next one, 0 is the 4-th element + // for index 4, hasher will return 0 which will translate to 0, 0 is already picked, 1 is already picked, 2 is already picked, + // 3 is the 4-th element + // for index 5, hasher will return 9 which will translate to 9, so 9, 0, 1, 2, 3 are already picked, 4 is the 5-th element + script := make(map[string]*big.Int) + + script[string(uint64ToBytes(0))+randomnessWithRound] = big.NewInt(11) //will translate to 1, add 1 + script[string(uint64ToBytes(1))+randomnessWithRound] = big.NewInt(1) //will translate to 1, add 2 + script[string(uint64ToBytes(2))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 9 + script[string(uint64ToBytes(3))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 0 + script[string(uint64ToBytes(4))+randomnessWithRound] = big.NewInt(0) //will translate to 0, add 3 + script[string(uint64ToBytes(5))+randomnessWithRound] = big.NewInt(9) //will translate to 9, add 4 + + hasher.ComputeCalled = func(s string) []byte { + val, ok := script[s] + + if !ok { + assert.Fail(t, "should have not got here") + } + + return convertBigIntToBytes(val) + } + + validator0 := mock.NewValidatorMock(big.NewInt(1), 1, []byte("pk0"), []byte("addr0")) + validator1 := mock.NewValidatorMock(big.NewInt(2), 2, []byte("pk1"), []byte("addr1")) + validator2 := mock.NewValidatorMock(big.NewInt(3), 3, []byte("pk2"), []byte("addr2")) + validator3 := mock.NewValidatorMock(big.NewInt(4), 4, []byte("pk3"), []byte("addr3")) + validator4 := mock.NewValidatorMock(big.NewInt(5), 5, []byte("pk4"), []byte("addr4")) + validator5 := mock.NewValidatorMock(big.NewInt(6), 6, []byte("pk5"), []byte("addr5")) + validator6 := mock.NewValidatorMock(big.NewInt(7), 7, []byte("pk6"), []byte("addr6")) + validator7 := mock.NewValidatorMock(big.NewInt(8), 8, []byte("pk7"), []byte("addr7")) + validator8 := mock.NewValidatorMock(big.NewInt(9), 9, []byte("pk8"), []byte("addr8")) + validator9 := mock.NewValidatorMock(big.NewInt(10), 10, []byte("pk9"), []byte("addr9")) + + list := []sharding.Validator{ + validator0, + validator1, + validator2, + validator3, + validator4, + validator5, + validator6, + validator7, + validator8, + validator9, + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + validatorMeta, _ := sharding.NewValidator(big.NewInt(1), 1, []byte("pubKeyMeta"), []byte("addressMeta")) + nodesMap[sharding.MetachainShardId] = []sharding.Validator{validatorMeta} + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 6, + 1, + hasher, + 0, + 1, + nodesMap, + ) + + list2, err := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Nil(t, err) + assert.Equal(t, 6, len(list2)) + //check order as described in script + assert.Equal(t, validator1, list2[0]) + assert.Equal(t, validator2, list2[1]) + assert.Equal(t, validator9, list2[2]) + assert.Equal(t, validator0, list2[3]) + assert.Equal(t, validator3, list2[4]) + assert.Equal(t, validator4, list2[5]) +} + +func BenchmarkIndexHashedGroupSelector_ComputeValidatorsGroup21of400(b *testing.B) { + consensusGroupSize := 21 + list := make([]sharding.Validator, 0) + + //generate 400 validators + for i := 0; i < 400; i++ { + list = append(list, mock.NewValidatorMock(big.NewInt(0), 0, []byte("pk"+strconv.Itoa(i)), []byte("addr"+strconv.Itoa(i)))) + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + consensusGroupSize, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + randomness := strconv.Itoa(i) + list2, _ := ihgs.ComputeValidatorsGroup([]byte(randomness), 0, 0) + + assert.Equal(b, consensusGroupSize, len(list2)) + } +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrNilPubKey(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + _, _, err := ihgs.GetValidatorWithPublicKey(nil) + + assert.Equal(t, sharding.ErrNilPubKey, err) +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldReturnErrValidatorNotFound(t *testing.T) { + t.Parallel() + + list := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0"), []byte("addr0")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[0] = list + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 1, + nodesMap, + ) + + _, _, err := ihgs.GetValidatorWithPublicKey([]byte("pk1")) + + assert.Equal(t, sharding.ErrValidatorNotFound, err) +} + +func TestIndexHashedGroupSelector_GetValidatorWithPublicKeyShouldWork(t *testing.T) { + t.Parallel() + + list_meta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_meta"), []byte("addr0_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_meta"), []byte("addr1_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_meta"), []byte("addr2_meta")), + } + list_shard0 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard0"), []byte("addr0_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard0"), []byte("addr1_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard0"), []byte("addr2_shard0")), + } + list_shard1 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk0_shard1"), []byte("addr0_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk1_shard1"), []byte("addr1_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, []byte("pk2_shard1"), []byte("addr2_shard1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[sharding.MetachainShardId] = list_meta + nodesMap[0] = list_shard0 + nodesMap[1] = list_shard1 + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + 0, + 2, + nodesMap, + ) + + validator, shardId, err := ihgs.GetValidatorWithPublicKey([]byte("pk0_meta")) + assert.Nil(t, err) + assert.Equal(t, sharding.MetachainShardId, shardId) + assert.Equal(t, []byte("addr0_meta"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk1_shard0")) + assert.Nil(t, err) + assert.Equal(t, uint32(0), shardId) + assert.Equal(t, []byte("addr1_shard0"), validator.Address()) + + validator, shardId, err = ihgs.GetValidatorWithPublicKey([]byte("pk2_shard1")) + assert.Nil(t, err) + assert.Equal(t, uint32(1), shardId) + assert.Equal(t, []byte("addr2_shard1"), validator.Address()) +} + +func TestIndexHashedGroupSelector_GetAllValidatorsPublicKeys(t *testing.T) { + t.Parallel() + + shardZeroId := uint32(0) + shardOneId := uint32(1) + expectedValidatorsPubKeys := map[uint32][][]byte{ + shardZeroId: {[]byte("pk0_shard0"), []byte("pk1_shard0"), []byte("pk2_shard0")}, + shardOneId: {[]byte("pk0_shard1"), []byte("pk1_shard1"), []byte("pk2_shard1")}, + sharding.MetachainShardId: {[]byte("pk0_meta"), []byte("pk1_meta"), []byte("pk2_meta")}, + } + + listMeta := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][0], []byte("addr0_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][1], []byte("addr1_meta")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[sharding.MetachainShardId][2], []byte("addr2_meta")), + } + listShard0 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][0], []byte("addr0_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][1], []byte("addr1_shard0")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardZeroId][2], []byte("addr2_shard0")), + } + listShard1 := []sharding.Validator{ + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][0], []byte("addr0_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][1], []byte("addr1_shard1")), + mock.NewValidatorMock(big.NewInt(1), 2, expectedValidatorsPubKeys[shardOneId][2], []byte("addr2_shard1")), + } + + nodesMap := make(map[uint32][]sharding.Validator) + nodesMap[sharding.MetachainShardId] = listMeta + nodesMap[shardZeroId] = listShard0 + nodesMap[shardOneId] = listShard1 + + ihgs, _ := sharding.NewIndexHashedNodesCoordinator( + 1, + 1, + &mock.HasherMock{}, + shardZeroId, + 2, + nodesMap, + ) + + allValidatorsPublicKeys := ihgs.GetAllValidatorsPublicKeys() + assert.Equal(t, expectedValidatorsPubKeys, allValidatorsPublicKeys) +} diff --git a/sharding/interface.go b/sharding/interface.go new file mode 100644 index 00000000000..b567d2490bb --- /dev/null +++ b/sharding/interface.go @@ -0,0 +1,47 @@ +package sharding + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/data/state" +) + +// MetachainShardId will be used to identify a shard ID as metachain +const MetachainShardId = uint32(0xFFFFFFFF) + +// Coordinator defines what a shard state coordinator should hold +type Coordinator interface { + NumberOfShards() uint32 + ComputeId(address state.AddressContainer) uint32 + SelfId() uint32 + SameShard(firstAddress, secondAddress state.AddressContainer) bool + CommunicationIdentifier(destShardID uint32) string + IsInterfaceNil() bool +} + +// Validator defines a node that can be allocated to a shard for participation in a consensus group as validator +// or block proposer +type Validator interface { + Stake() *big.Int + Rating() int32 + PubKey() []byte + Address() []byte +} + +// NodesCoordinator defines the behaviour of a struct able to do validator group selection +type NodesCoordinator interface { + PublicKeysSelector + SetNodesPerShards(nodes map[uint32][]Validator) error + ComputeValidatorsGroup(randomness []byte, round uint64, shardId uint32) (validatorsGroup []Validator, err error) + GetValidatorWithPublicKey(publicKey []byte) (validator Validator, shardId uint32, err error) + IsInterfaceNil() bool +} + +// PublicKeysSelector allows retrieval of eligible validators public keys +type PublicKeysSelector interface { + GetValidatorsIndexes(publicKeys []string) []uint64 + GetAllValidatorsPublicKeys() map[uint32][][]byte + GetSelectedPublicKeys(selection []byte, shardId uint32) (publicKeys []string, err error) + GetValidatorsPublicKeys(randomness []byte, round uint64, shardId uint32) ([]string, error) + GetValidatorsRewardsAddresses(randomness []byte, round uint64, shardId uint32) ([]string, error) +} diff --git a/sharding/mock/hasherMock.go b/sharding/mock/hasherMock.go new file mode 100644 index 00000000000..0218936b5c0 --- /dev/null +++ b/sharding/mock/hasherMock.go @@ -0,0 +1,37 @@ +package mock + +import "crypto/sha256" + +var sha256EmptyHash []byte + +// HasherMock that will be used for testing +type HasherMock struct { +} + +// Compute will output the SHA's equivalent of the input string +func (sha *HasherMock) Compute(s string) []byte { + h := sha256.New() + h.Write([]byte(s)) + return h.Sum(nil) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (sha *HasherMock) EmptyHash() []byte { + if len(sha256EmptyHash) == 0 { + sha256EmptyHash = sha.Compute("") + } + return sha256EmptyHash +} + +// Size return the required size in bytes +func (sha *HasherMock) Size() int { + return sha256.Size +} + +// IsInterfaceNil returns true if there is no value under the interface +func (sha *HasherMock) IsInterfaceNil() bool { + if sha == nil { + return true + } + return false +} diff --git a/sharding/mock/hasherStub.go b/sharding/mock/hasherStub.go new file mode 100644 index 00000000000..216fc9d9909 --- /dev/null +++ b/sharding/mock/hasherStub.go @@ -0,0 +1,30 @@ +package mock + +type HasherStub struct { + ComputeCalled func(s string) []byte + EmptyHashCalled func() []byte + SizeCalled func() int +} + +// Compute will output the SHA's equivalent of the input string +func (hs *HasherStub) Compute(s string) []byte { + return hs.ComputeCalled(s) +} + +// EmptyHash will return the equivalent of empty string SHA's +func (hs *HasherStub) EmptyHash() []byte { + return hs.EmptyHashCalled() +} + +// Size returns the required size in bytes +func (hs *HasherStub) Size() int { + return hs.SizeCalled() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (hs *HasherStub) IsInterfaceNil() bool { + if hs == nil { + return true + } + return false +} diff --git a/sharding/mock/invalidNodesSetupMock.json b/sharding/mock/invalidNodesSetupMock.json index 1da83d4d669..67458949a71 100644 --- a/sharding/mock/invalidNodesSetupMock.json +++ b/sharding/mock/invalidNodesSetupMock.json @@ -8,10 +8,12 @@ "metaChainMinNodes" : 1, "initialNodes": [ { - "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081" + "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "address": "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49" }, { - "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32" + "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "address": "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836" } ] } diff --git a/sharding/mock/nodesCoordinatorMock.go b/sharding/mock/nodesCoordinatorMock.go new file mode 100644 index 00000000000..9ad5515d724 --- /dev/null +++ b/sharding/mock/nodesCoordinatorMock.go @@ -0,0 +1,71 @@ +package mock + +import ( + "math/big" + + "github.com/ElrondNetwork/elrond-go/sharding" +) + +type NodesCoordinatorMock struct { + ComputeValidatorsGroupCalled func([]byte) ([]sharding.Validator, error) + GetValidatorsPublicKeysCalled func(randomness []byte) ([]string, error) +} + +func (ncm NodesCoordinatorMock) ComputeValidatorsGroup(randomness []byte) (validatorsGroup []sharding.Validator, err error) { + if ncm.ComputeValidatorsGroupCalled != nil { + return ncm.ComputeValidatorsGroupCalled(randomness) + } + + list := []sharding.Validator{ + NewValidatorMock(big.NewInt(0), 0, []byte("A"), []byte("AA")), + NewValidatorMock(big.NewInt(0), 0, []byte("B"), []byte("BB")), + NewValidatorMock(big.NewInt(0), 0, []byte("C"), []byte("CC")), + NewValidatorMock(big.NewInt(0), 0, []byte("D"), []byte("DD")), + NewValidatorMock(big.NewInt(0), 0, []byte("E"), []byte("EE")), + NewValidatorMock(big.NewInt(0), 0, []byte("F"), []byte("FF")), + NewValidatorMock(big.NewInt(0), 0, []byte("G"), []byte("GG")), + NewValidatorMock(big.NewInt(0), 0, []byte("H"), []byte("HH")), + NewValidatorMock(big.NewInt(0), 0, []byte("I"), []byte("II")), + } + + return list, nil +} + +func (ncm NodesCoordinatorMock) GetValidatorsPublicKeys(randomness []byte) ([]string, error) { + if ncm.GetValidatorsPublicKeysCalled != nil { + return ncm.GetValidatorsPublicKeysCalled(randomness) + } + + validators, err := ncm.ComputeValidatorsGroup(randomness) + if err != nil { + return nil, err + } + + pubKeys := make([]string, 0) + + for _, v := range validators { + pubKeys = append(pubKeys, string(v.PubKey())) + } + + return pubKeys, nil +} + +func (ncm NodesCoordinatorMock) ConsensusGroupSize() int { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) SetNodesPerShards(map[uint32][]sharding.Validator) error { + return nil +} + +func (ncm NodesCoordinatorMock) SetConsensusGroupSize(int) error { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) GetSelectedPublicKeys(selection []byte) (publicKeys []string, err error) { + panic("implement me") +} + +func (ncm NodesCoordinatorMock) GetValidatorWithPublicKey(publicKey []byte) (sharding.Validator, uint32, error) { + panic("implement me") +} diff --git a/sharding/mock/nodesSetupMock.json b/sharding/mock/nodesSetupMock.json index 78110c8a6b6..17cf384c5b4 100644 --- a/sharding/mock/nodesSetupMock.json +++ b/sharding/mock/nodesSetupMock.json @@ -8,19 +8,24 @@ "metaChainMinNodes" : 1, "initialNodes": [ { - "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081" + "pubkey": "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "address": "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49" }, { - "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32" + "pubkey": "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "address": "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836" }, { - "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140" + "pubkey": "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "address": "131e2e717f2d33bdf7850c12b03dfe41ea8a5e76fdd6d4f23aebe558603e746f" }, { - "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e" + "pubkey": "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "address": "4c9e66b605882c1099088f26659692f084e41dc0dedfaedf6a6409af21c02aac" }, { - "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a" + "pubkey": "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "address": "90a66900634b206d20627fbaec432ebfbabeaf30b9e338af63191435e2e37022" } ] } diff --git a/sharding/mock/validatorMock.go b/sharding/mock/validatorMock.go new file mode 100644 index 00000000000..e4f9bf01af8 --- /dev/null +++ b/sharding/mock/validatorMock.go @@ -0,0 +1,32 @@ +package mock + +import ( + "math/big" +) + +type ValidatorMock struct { + stake *big.Int + rating int32 + pubKey []byte + address []byte +} + +func NewValidatorMock(stake *big.Int, rating int32, pubKey []byte, address []byte) *ValidatorMock { + return &ValidatorMock{stake: stake, rating: rating, pubKey: pubKey, address: address} +} + +func (vm *ValidatorMock) Stake() *big.Int { + return vm.stake +} + +func (vm *ValidatorMock) Rating() int32 { + return vm.rating +} + +func (vm *ValidatorMock) PubKey() []byte { + return vm.pubKey +} + +func (vm *ValidatorMock) Address() []byte { + return vm.address +} diff --git a/sharding/nodesSetup.go b/sharding/nodesSetup.go index 6d9a1fee4cb..535148e4562 100644 --- a/sharding/nodesSetup.go +++ b/sharding/nodesSetup.go @@ -9,9 +9,31 @@ import ( // InitialNode holds data from json type InitialNode struct { - PubKey string `json:"pubkey"` + PubKey string `json:"pubkey"` + Address string `json:"address"` + NodeInfo +} + +// NodeInfo holds node info +type NodeInfo struct { assignedShard uint32 pubKey []byte + address []byte +} + +// AssignedShard gets the node assigned shard +func (ni *NodeInfo) AssignedShard() uint32 { + return ni.assignedShard +} + +// Address gets the node address +func (ni *NodeInfo) Address() []byte { + return ni.address +} + +// PubKey gets the node public key +func (ni *NodeInfo) PubKey() []byte { + return ni.pubKey } // NodesSetup hold data for decoded data from json file @@ -29,7 +51,7 @@ type NodesSetup struct { nrOfShards uint32 nrOfNodes uint32 nrOfMetaChainNodes uint32 - allNodesPubKeys map[uint32][]string + allNodesInfo map[uint32][]*NodeInfo } // NewNodesSetup creates a new decoded nodes structure from json config file @@ -52,7 +74,7 @@ func NewNodesSetup(nodesFilePath string, numOfNodes uint64) (*NodesSetup, error) nodes.processMetaChainAssigment() nodes.processShardAssignment() - nodes.createInitialNodesPubKeys() + nodes.createInitialNodesInfo() return nodes, nil } @@ -64,6 +86,7 @@ func (ns *NodesSetup) processConfig() error { ns.nrOfMetaChainNodes = 0 for i := 0; i < len(ns.InitialNodes); i++ { ns.InitialNodes[i].pubKey, err = hex.DecodeString(ns.InitialNodes[i].PubKey) + ns.InitialNodes[i].address, err = hex.DecodeString(ns.InitialNodes[i].Address) // decoder treats empty string as correct, it is not allowed to have empty string as public key if ns.InitialNodes[i].PubKey == "" || err != nil { @@ -71,6 +94,12 @@ func (ns *NodesSetup) processConfig() error { return ErrCouldNotParsePubKey } + // decoder treats empty string as correct, it is not allowed to have empty string as address + if ns.InitialNodes[i].Address == "" || err != nil { + ns.InitialNodes[i].address = nil + return ErrCouldNotParseAddress + } + ns.nrOfNodes++ } @@ -133,32 +162,66 @@ func (ns *NodesSetup) processShardAssignment() { } } -func (ns *NodesSetup) createInitialNodesPubKeys() { +func (ns *NodesSetup) createInitialNodesInfo() { nrOfShardAndMeta := ns.nrOfShards + 1 - ns.allNodesPubKeys = make(map[uint32][]string, nrOfShardAndMeta) + ns.allNodesInfo = make(map[uint32][]*NodeInfo, nrOfShardAndMeta) for _, in := range ns.InitialNodes { - if in.pubKey != nil { - ns.allNodesPubKeys[in.assignedShard] = append(ns.allNodesPubKeys[in.assignedShard], string(in.pubKey)) + if in.pubKey != nil && in.address != nil { + ns.allNodesInfo[in.assignedShard] = append(ns.allNodesInfo[in.assignedShard], + &NodeInfo{in.assignedShard, in.pubKey, in.address}) } } } -// InitialNodesPubKeys - gets initial public keys +// InitialNodesPubKeys - gets initial nodes public keys func (ns *NodesSetup) InitialNodesPubKeys() map[uint32][]string { - return ns.allNodesPubKeys + allNodesPubKeys := make(map[uint32][]string, 0) + for shardId, nodesInfo := range ns.allNodesInfo { + pubKeys := make([]string, len(nodesInfo)) + for i := 0; i < len(nodesInfo); i++ { + pubKeys[i] = string(nodesInfo[i].pubKey) + } + + allNodesPubKeys[shardId] = pubKeys + } + + return allNodesPubKeys +} + +// InitialNodesInfo - gets initial nodes info +func (ns *NodesSetup) InitialNodesInfo() map[uint32][]*NodeInfo { + return ns.allNodesInfo } -// InitialNodesPubKeysForShard - gets initial public keys +// InitialNodesPubKeysForShard - gets initial nodes public keys for shard func (ns *NodesSetup) InitialNodesPubKeysForShard(shardId uint32) ([]string, error) { - if ns.allNodesPubKeys[shardId] == nil { + if ns.allNodesInfo[shardId] == nil { + return nil, ErrShardIdOutOfRange + } + if len(ns.allNodesInfo[shardId]) == 0 { + return nil, ErrNoPubKeys + } + + nodesInfo := ns.allNodesInfo[shardId] + pubKeys := make([]string, len(nodesInfo)) + for i := 0; i < len(nodesInfo); i++ { + pubKeys[i] = string(nodesInfo[i].pubKey) + } + + return pubKeys, nil +} + +// InitialNodesInfoForShard - gets initial nodes info for shard +func (ns *NodesSetup) InitialNodesInfoForShard(shardId uint32) ([]*NodeInfo, error) { + if ns.allNodesInfo[shardId] == nil { return nil, ErrShardIdOutOfRange } - if len(ns.allNodesPubKeys[shardId]) == 0 { + if len(ns.allNodesInfo[shardId]) == 0 { return nil, ErrNoPubKeys } - return ns.allNodesPubKeys[shardId], nil + return ns.allNodesInfo[shardId], nil } // NumberOfShards returns the calculated number of shards diff --git a/sharding/nodesSetup_test.go b/sharding/nodesSetup_test.go index 3f0c072940f..9ee30a85038 100644 --- a/sharding/nodesSetup_test.go +++ b/sharding/nodesSetup_test.go @@ -8,18 +8,40 @@ import ( "github.com/stretchr/testify/assert" ) +var ( + PubKeys = []string{ + "41378f754e2c7b2745208c3ed21b151d297acdc84c3aca00b9e292cf28ec2d444771070157ea7760ed83c26f4fed387d0077e00b563a95825dac2cbc349fc0025ccf774e37b0a98ad9724d30e90f8c29b4091ccb738ed9ffc0573df776ee9ea30b3c038b55e532760ea4a8f152f2a52848020e5cee1cc537f2c2323399723081", + "52f3bf5c01771f601ec2137e267319ab6716ef6ff5dfddaea48b42d955f631167f2ce19296a202bb8fd174f4e94f8c85f619df85a7f9f8de0f3768e5e6d8c48187b767deccf9829be246aa331aa86d182eb8fa28ea8a3e45d357ed1647a9be020a5569d686253a6f89e9123c7f21f302e82f67d3e3cd69cf267b9910a663ef32", + "5e91c426c5c8f5f805f86de1e0653e2ec33853772e583b88e9f0f201089d03d8570759c3c3ab610ce573493c33ba0adf954c8939dba5d5ef7f2be4e87145d8153fc5b4fb91cecb8d9b1f62e080743fbf69c8c3096bf07980bb82cb450ba9b902673373d5b671ea73620cc5bc4d36f7a0f5ca3684d4c8aa5c1b425ab2a8673140", + "73972bf46dca59fba211c58f11b530f8e9d6392c499655ce760abc6458fd9c6b54b9676ee4b95aa32f6c254c9aad2f63a6195cd65d837a4320d7b8e915ba3a7123c8f4983b201035573c0752bb54e9021eb383b40d302447b62ea7a3790c89c47f5ab81d183f414e87611a31ff635ad22e969495356d5bc44eec7917aaad4c5e", + "7391ccce066ab5674304b10220643bc64829afa626a165f1e7a6618e260fa68f8e79018ac5964f7a1b8dd419645049042e34ebe7f2772def71e6176ce9daf50a57c17ee2a7445b908fe47e8f978380fcc2654a19925bf73db2402b09dde515148081f8ca7c331fbedec689de1b7bfce6bf106e4433557c29752c12d0a009f47a", + "24dea9b5c79174c558c38316b2df25b956c53f0d0128b7427d219834867cc1b0868b7faff0205fe23e5ffdf276acfad6423890c782c7be7b98a31d501e4276a015a54d9849109322130fc9a9cb61d183318d50fcde44fabcbf600051c7cb950304b05e82f90f2ac4647016f39439608cd64ccc82fe6e996289bb2150e4e3ab08", + } + + Address = []string{ + "9e95a4e46da335a96845b4316251fc1bb197e1b8136d96ecc62bf6604eca9e49", + "7a330039e77ca06bc127319fd707cc4911a80db489a39fcfb746283a05f61836", + "131e2e717f2d33bdf7850c12b03dfe41ea8a5e76fdd6d4f23aebe558603e746f", + "4c9e66b605882c1099088f26659692f084e41dc0dedfaedf6a6409af21c02aac", + "90a66900634b206d20627fbaec432ebfbabeaf30b9e338af63191435e2e37022", + "63f702e061385324a25dc4f1bcfc7e4f4692bcd80de71bd4dd7d6e2f67f92481", + } +) + func createNodesSetupOneShardOneNodeWithOneMeta() *sharding.NodesSetup { + noOfInitialNodes := 2 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 1 ns.MetaChainConsensusGroupSize = 1 ns.MetaChainMinNodes = 1 - ns.InitialNodes = make([]*sharding.InitialNode, 2) + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" + ns.InitialNodes[0].PubKey = PubKeys[0] + ns.InitialNodes[0].Address = Address[0] ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[1].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - + ns.InitialNodes[1].PubKey = PubKeys[1] + ns.InitialNodes[1].Address = Address[1] err := ns.ProcessConfig() if err != nil { return nil @@ -27,32 +49,24 @@ func createNodesSetupOneShardOneNodeWithOneMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShardTwoNodesWithOneMeta() *sharding.NodesSetup { + noOfInitialNodes := 6 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 ns.MetaChainConsensusGroupSize = 1 - ns.MetaChainMinNodes = 1 - ns.InitialNodes = make([]*sharding.InitialNode, 5) - ns.InitialNodes[0] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417", - } - ns.InitialNodes[1] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419", - } - ns.InitialNodes[2] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418", - } - ns.InitialNodes[3] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417", - } - ns.InitialNodes[4] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416", + ns.MetaChainMinNodes = 2 + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] } err := ns.ProcessConfig() @@ -62,35 +76,24 @@ func createNodesSetupTwoShardTwoNodesWithOneMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShard5NodesWithMeta() *sharding.NodesSetup { + noOfInitialNodes := 5 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 - ns.MetaChainMinNodes = 1 ns.MetaChainConsensusGroupSize = 1 - ns.InitialNodes = make([]*sharding.InitialNode, 6) - ns.InitialNodes[0] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7410", - } - ns.InitialNodes[1] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419", - } - ns.InitialNodes[2] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418", - } - ns.InitialNodes[3] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417", - } - ns.InitialNodes[4] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416", - } - ns.InitialNodes[5] = &sharding.InitialNode{ - PubKey: "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7411", + ns.MetaChainMinNodes = 1 + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] } err := ns.ProcessConfig() @@ -100,31 +103,25 @@ func createNodesSetupTwoShard5NodesWithMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } func createNodesSetupTwoShard6NodesMeta() *sharding.NodesSetup { + noOfInitialNodes := 6 ns := &sharding.NodesSetup{} ns.ConsensusGroupSize = 1 ns.MinNodesPerShard = 2 ns.MetaChainMinNodes = 2 ns.MetaChainConsensusGroupSize = 2 - ns.InitialNodes = make([]*sharding.InitialNode, 6) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} - ns.InitialNodes[3] = &sharding.InitialNode{} - ns.InitialNodes[4] = &sharding.InitialNode{} - ns.InitialNodes[5] = &sharding.InitialNode{} - - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" - ns.InitialNodes[3].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7416" - ns.InitialNodes[4].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7411" - ns.InitialNodes[5].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7410" + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() if err != nil { @@ -133,7 +130,7 @@ func createNodesSetupTwoShard6NodesMeta() *sharding.NodesSetup { ns.ProcessMetaChainAssigment() ns.ProcessShardAssignment() - ns.CreateInitialNodesPubKeys() + ns.CreateInitialNodesInfo() return ns } @@ -170,20 +167,23 @@ func TestNodesSetup_NewNodesShouldTrimInitialNodesList(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysFromNil(t *testing.T) { ns := sharding.NodesSetup{} - inPubKeys := ns.InitialNodesPubKeys() + inPubKeys := ns.InitialNodesInfo() assert.NotNil(t, ns) assert.Nil(t, inPubKeys) } func TestNodesSetup_ProcessConfigNodesWithIncompleteDataShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{} - ns.InitialNodes = make([]*sharding.InitialNode, 2) + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) + ns.InitialNodes[0] = &sharding.InitialNode{} ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" + ns.InitialNodes[0].PubKey = PubKeys[0] + ns.InitialNodes[0].Address = Address[0] err := ns.ProcessConfig() @@ -192,17 +192,19 @@ func TestNodesSetup_ProcessConfigNodesWithIncompleteDataShouldErr(t *testing.T) } func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 0, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -211,6 +213,7 @@ func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeShouldErr(t *testing.T } func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -218,12 +221,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testi MetaChainMinNodes: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -232,17 +236,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeShouldErr(t *testi } func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeLargerThanNumOfNodesShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -251,6 +257,7 @@ func TestNodesSetup_ProcessConfigInvalidConsensusGroupSizeLargerThanNumOfNodesSh } func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNodesShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -259,11 +266,12 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNod } ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -272,17 +280,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaConsensusGroupSizeLargerThanNumOfNod } func TestNodesSetup_ProcessConfigInvalidMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -291,6 +301,7 @@ func TestNodesSetup_ProcessConfigInvalidMinNodesPerShardShouldErr(t *testing.T) } func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 1 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -298,12 +309,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing MetaChainMinNodes: 0, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -312,17 +324,19 @@ func TestNodesSetup_ProcessConfigInvalidMetaMinNodesPerShardShouldErr(t *testing } func TestNodesSetup_ProcessConfigInvalidNumOfNodesSmallerThanMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 2 ns := sharding.NodesSetup{ ConsensusGroupSize: 2, MinNodesPerShard: 3, } - ns.InitialNodes = make([]*sharding.InitialNode, 2) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -331,6 +345,7 @@ func TestNodesSetup_ProcessConfigInvalidNumOfNodesSmallerThanMinNodesPerShardSho } func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShardShouldErr(t *testing.T) { + noOfInitialNodes := 3 ns := sharding.NodesSetup{ ConsensusGroupSize: 1, MinNodesPerShard: 1, @@ -339,14 +354,13 @@ func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShar MetaChainMinNodes: 3, } - ns.InitialNodes = make([]*sharding.InitialNode, 3) - ns.InitialNodes[0] = &sharding.InitialNode{} - ns.InitialNodes[1] = &sharding.InitialNode{} - ns.InitialNodes[2] = &sharding.InitialNode{} + ns.InitialNodes = make([]*sharding.InitialNode, noOfInitialNodes) - ns.InitialNodes[0].PubKey = "5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7419" - ns.InitialNodes[1].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418" - ns.InitialNodes[2].PubKey = "3336b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417" + for i := 0; i < noOfInitialNodes; i++ { + ns.InitialNodes[i] = &sharding.InitialNode{} + ns.InitialNodes[i].PubKey = PubKeys[i] + ns.InitialNodes[i].Address = Address[i] + } err := ns.ProcessConfig() @@ -356,7 +370,7 @@ func TestNodesSetup_ProcessConfigInvalidMetaNumOfNodesSmallerThanMinNodesPerShar func TestNodesSetup_InitialNodesPubKeysForShardNil(t *testing.T) { ns := sharding.NodesSetup{} - inPK, err := ns.InitialNodesPubKeysForShard(0) + inPK, err := ns.InitialNodesInfoForShard(0) assert.NotNil(t, ns) assert.Nil(t, inPK) @@ -365,7 +379,7 @@ func TestNodesSetup_InitialNodesPubKeysForShardNil(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardWrongShard(t *testing.T) { ns := createNodesSetupOneShardOneNodeWithOneMeta() - inPK, err := ns.InitialNodesPubKeysForShard(1) + inPK, err := ns.InitialNodesInfoForShard(1) assert.NotNil(t, ns) assert.Nil(t, inPK) @@ -374,27 +388,27 @@ func TestNodesSetup_InitialNodesPubKeysForShardWrongShard(t *testing.T) { func TestNodesSetup_InitialNodesPubKeysForShardGood(t *testing.T) { ns := createNodesSetupTwoShardTwoNodesWithOneMeta() - inPK, err := ns.InitialNodesPubKeysForShard(1) + inPK, err := ns.InitialNodesInfoForShard(1) assert.NotNil(t, ns) - assert.Equal(t, len(inPK), 2) + assert.Equal(t, 2, len(inPK)) assert.Nil(t, err) } func TestNodesSetup_InitialNodesPubKeysForShardGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() metaId := sharding.MetachainShardId - inPK, err := ns.InitialNodesPubKeysForShard(metaId) + inPK, err := ns.InitialNodesInfoForShard(metaId) assert.NotNil(t, ns) - assert.Equal(t, len(inPK), 2) + assert.Equal(t, 2, len(inPK)) assert.Nil(t, err) } func TestNodesSetup_PublicKeyNotGood(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() - _, err := ns.GetShardIDForPubKey([]byte("5126b6505a73e59a994caa8f956f8c335d4399229de42102bb4814ca261c7419")) + _, err := ns.GetShardIDForPubKey([]byte(PubKeys[0])) assert.NotNil(t, ns) assert.NotNil(t, err) @@ -402,18 +416,18 @@ func TestNodesSetup_PublicKeyNotGood(t *testing.T) { func TestNodesSetup_PublicKeyGood(t *testing.T) { ns := createNodesSetupTwoShard5NodesWithMeta() - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417") + publicKey, err := hex.DecodeString(PubKeys[2]) selfId, err := ns.GetShardIDForPubKey(publicKey) assert.NotNil(t, ns) assert.Nil(t, err) - assert.Equal(t, uint32(1), selfId) + assert.Equal(t, uint32(0), selfId) } func TestNodesSetup_ShardPublicKeyGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7417") + publicKey, err := hex.DecodeString(PubKeys[2]) selfId, err := ns.GetShardIDForPubKey(publicKey) @@ -425,7 +439,7 @@ func TestNodesSetup_ShardPublicKeyGoodMeta(t *testing.T) { func TestNodesSetup_MetaPublicKeyGoodMeta(t *testing.T) { ns := createNodesSetupTwoShard6NodesMeta() metaId := sharding.MetachainShardId - publicKey, err := hex.DecodeString("5126b6505a73e59a994caa8f556f8c335d4399229de42102bb4814ca261c7418") + publicKey, err := hex.DecodeString(PubKeys[0]) selfId, err := ns.GetShardIDForPubKey(publicKey) diff --git a/sharding/sharding.go b/sharding/sharding.go deleted file mode 100644 index 50712d0ab8f..00000000000 --- a/sharding/sharding.go +++ /dev/null @@ -1,18 +0,0 @@ -package sharding - -import ( - "github.com/ElrondNetwork/elrond-go/data/state" -) - -// MetachainShardId will be used to identify a shard ID as metachain -const MetachainShardId = uint32(0xFFFFFFFF) - -// Coordinator defines what a shard state coordinator should hold -type Coordinator interface { - NumberOfShards() uint32 - ComputeId(address state.AddressContainer) uint32 - SelfId() uint32 - SameShard(firstAddress, secondAddress state.AddressContainer) bool - CommunicationIdentifier(destShardID uint32) string - IsInterfaceNil() bool -} diff --git a/consensus/validators/validator.go b/sharding/validator.go similarity index 64% rename from consensus/validators/validator.go rename to sharding/validator.go index fe80d2c7645..65a6f44d38c 100644 --- a/consensus/validators/validator.go +++ b/sharding/validator.go @@ -1,17 +1,18 @@ -package validators +package sharding import ( "math/big" ) type validator struct { - stake *big.Int - rating int32 - pubKey []byte + stake *big.Int + rating int32 + pubKey []byte + address []byte } // NewValidator creates a new instance of a validator -func NewValidator(stake *big.Int, rating int32, pubKey []byte) (*validator, error) { +func NewValidator(stake *big.Int, rating int32, pubKey []byte, address []byte) (*validator, error) { if stake == nil { return nil, ErrNilStake } @@ -24,10 +25,15 @@ func NewValidator(stake *big.Int, rating int32, pubKey []byte) (*validator, erro return nil, ErrNilPubKey } + if address == nil { + return nil, ErrNilAddress + } + return &validator{ - stake: stake, - rating: rating, - pubKey: pubKey, + stake: stake, + rating: rating, + pubKey: pubKey, + address: address, }, nil } @@ -46,6 +52,11 @@ func (v *validator) PubKey() []byte { return v.pubKey } +// Address returns the validator's address +func (v *validator) Address() []byte { + return v.address +} + // IsInterfaceNil returns true if there is no value under the interface func (v *validator) IsInterfaceNil() bool { if v == nil { diff --git a/sharding/validator_test.go b/sharding/validator_test.go new file mode 100644 index 00000000000..c0f3953e005 --- /dev/null +++ b/sharding/validator_test.go @@ -0,0 +1,78 @@ +package sharding_test + +import ( + "math/big" + "testing" + + "github.com/ElrondNetwork/elrond-go/sharding" + "github.com/stretchr/testify/assert" +) + +func TestValidator_NewValidatorShouldFailOnNilStake(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(nil, 0, []byte("pk1"), []byte("addr1")) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNilStake, err) +} + +func TestValidator_NewValidatorShouldFailOnNegativeStake(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(-1), 0, []byte("pk1"), []byte("addr1")) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNegativeStake, err) +} + +func TestValidator_NewValidatorShouldFailOnNilPublickKey(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(0), 0, nil, []byte("addr1")) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNilPubKey, err) +} + +func TestValidator_NewValidatorShouldFailOnNilAddress(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), nil) + + assert.Nil(t, validator) + assert.Equal(t, sharding.ErrNilAddress, err) +} + +func TestValidator_NewValidatorShouldWork(t *testing.T) { + t.Parallel() + + validator, err := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) + + assert.NotNil(t, validator) + assert.Nil(t, err) +} + +func TestValidator_StakeShouldWork(t *testing.T) { + t.Parallel() + + validator, _ := sharding.NewValidator(big.NewInt(1), 0, []byte("pk1"), []byte("addr1")) + + assert.Equal(t, big.NewInt(1), validator.Stake()) +} + +func TestValidator_PubKeyShouldWork(t *testing.T) { + t.Parallel() + + validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) + + assert.Equal(t, []byte("pk1"), validator.PubKey()) +} + +func TestValidator_AddressShouldWork(t *testing.T) { + t.Parallel() + + validator, _ := sharding.NewValidator(big.NewInt(0), 0, []byte("pk1"), []byte("addr1")) + + assert.Equal(t, []byte("addr1"), validator.Address()) +} diff --git a/statusHandler/presenter/blockInfoGetters.go b/statusHandler/presenter/blockInfoGetters.go index f1ef411b034..aa6adcd9d8b 100644 --- a/statusHandler/presenter/blockInfoGetters.go +++ b/statusHandler/presenter/blockInfoGetters.go @@ -44,3 +44,8 @@ func (psh *PresenterStatusHandler) GetBlockSize() uint64 { return miniBlocksSize + headerSize } + +// GetHighestFinalBlockInShard will return highest nonce block notarized by metachain for current shard +func (psh *PresenterStatusHandler) GetHighestFinalBlockInShard() uint64 { + return psh.getFromCacheAsUint64(core.MetricHighestFinalBlockInShard) +} diff --git a/statusHandler/presenter/blockInfoGetters_test.go b/statusHandler/presenter/blockInfoGetters_test.go index 57fca23dfbb..38a3f2816ac 100644 --- a/statusHandler/presenter/blockInfoGetters_test.go +++ b/statusHandler/presenter/blockInfoGetters_test.go @@ -138,3 +138,14 @@ func TestPresenterStatusHandler_GetBlockSize(t *testing.T) { blockExpectedSize := miniBlocksSize + headerSize assert.Equal(t, blockExpectedSize, result) } + +func TestPresenterStatusHandler_GetHighestFinalBlockInShard(t *testing.T) { + t.Parallel() + + highestFinalBlockNonce := uint64(100) + presenterStatusHandler := NewPresenterStatusHandler() + presenterStatusHandler.SetUInt64Value(core.MetricHighestFinalBlockInShard, highestFinalBlockNonce) + result := presenterStatusHandler.GetHighestFinalBlockInShard() + + assert.Equal(t, highestFinalBlockNonce, result) +} diff --git a/statusHandler/view/interface.go b/statusHandler/view/interface.go index a8dc0d965a7..76f27235391 100644 --- a/statusHandler/view/interface.go +++ b/statusHandler/view/interface.go @@ -44,6 +44,7 @@ type Presenter interface { GetBlockSize() uint64 GetNumShardHeadersInPool() uint64 GetNumShardHeadersProcessed() uint64 + GetHighestFinalBlockInShard() uint64 // IsInterfaceNil returns true if there is no value under the interface IsInterfaceNil() bool diff --git a/statusHandler/view/termuic/termuiConsole.go b/statusHandler/view/termuic/termuiConsole.go index 383a60590b2..c08a03b7b41 100644 --- a/statusHandler/view/termuic/termuiConsole.go +++ b/statusHandler/view/termuic/termuiConsole.go @@ -3,6 +3,8 @@ package termuic import ( "os" "os/signal" + "sync" + "sync/atomic" "syscall" "time" @@ -16,6 +18,10 @@ import ( //refreshInterval is used for a ticker that refresh termui console at a specific interval const refreshInterval = time.Second +// numOfTicksBeforeRedrawing represents the number of ticks which have to pass until a fake resize will be made +// in order to clean the unwanted appeared characters +const numOfTicksBeforeRedrawing = 10 + var log = logger.DefaultLogger() // TermuiConsole data where is store data from handler @@ -23,6 +29,7 @@ type TermuiConsole struct { presenter view.Presenter consoleRender TermuiRender grid *termuiRenders.DrawableContainer + mutRefresh *sync.RWMutex } //NewTermuiConsole method is used to return a new TermuiConsole structure @@ -32,7 +39,8 @@ func NewTermuiConsole(presenter view.Presenter) (*TermuiConsole, error) { } tc := TermuiConsole{ - presenter: presenter, + presenter: presenter, + mutRefresh: &sync.RWMutex{}, } return &tc, nil @@ -75,14 +83,12 @@ func (tc *TermuiConsole) eventLoop() { signal.Notify(sigTerm, os.Interrupt, syscall.SIGTERM) tc.consoleRender.RefreshData() + ticksCounter := uint32(0) for { select { case <-time.After(refreshInterval): - tc.consoleRender.RefreshData() - ui.Clear() - ui.Render(tc.grid.TopLeft(), tc.grid.TopRight(), tc.grid.Bottom()) - + tc.doChanges(&ticksCounter) case <-sigTerm: ui.Clear() return @@ -95,14 +101,39 @@ func (tc *TermuiConsole) eventLoop() { func (tc *TermuiConsole) processUiEvents(e ui.Event) { switch e.ID { case "": - payload := e.Payload.(ui.Resize) - tc.grid.SetRectangle(0, 0, payload.Width, payload.Height) - ui.Clear() - ui.Render(tc.grid.TopLeft(), tc.grid.TopRight(), tc.grid.Bottom()) - + tc.doResizeEvent(e) case "": ui.Close() StopApplication() return } } + +func (tc *TermuiConsole) doChanges(counter *uint32) { + atomic.AddUint32(counter, 1) + if atomic.LoadUint32(counter) > numOfTicksBeforeRedrawing { + tc.doResize(ui.TerminalDimensions()) + atomic.StoreUint32(counter, 0) + } else { + tc.refreshWindow() + } +} + +func (tc *TermuiConsole) doResizeEvent(e ui.Event) { + payload := e.Payload.(ui.Resize) + tc.doResize(payload.Width, payload.Height) +} + +func (tc *TermuiConsole) doResize(width int, height int) { + tc.grid.SetRectangle(0, 0, width, height) + tc.refreshWindow() +} + +func (tc *TermuiConsole) refreshWindow() { + tc.mutRefresh.Lock() + defer tc.mutRefresh.Unlock() + + tc.consoleRender.RefreshData() + ui.Clear() + ui.Render(tc.grid.TopLeft(), tc.grid.TopRight(), tc.grid.Bottom()) +} diff --git a/statusHandler/view/termuic/termuiRenders/widgetsRender.go b/statusHandler/view/termuic/termuiRenders/widgetsRender.go index 4151f6d3a5a..78406f4a299 100644 --- a/statusHandler/view/termuic/termuiRenders/widgetsRender.go +++ b/statusHandler/view/termuic/termuiRenders/widgetsRender.go @@ -239,6 +239,12 @@ func (wr *WidgetsRender) prepareBlockInfo() { crossCheckBlockHeight := wr.presenter.GetCrossCheckBlockHeight() rows[4] = []string{fmt.Sprintf("Cross check block height: %s", crossCheckBlockHeight)} + shardId := wr.presenter.GetShardId() + if shardId != uint64(sharding.MetachainShardId) { + highestFinalBlockInShard := wr.presenter.GetHighestFinalBlockInShard() + rows[4][0] += fmt.Sprintf(", highest final block nonce in shard: %d", highestFinalBlockInShard) + } + consensusState := wr.presenter.GetConsensusState() rows[5] = []string{fmt.Sprintf("Consensus state: %s", consensusState)} @@ -275,8 +281,13 @@ func (wr *WidgetsRender) prepareListWithLogsForDisplay() { func (wr *WidgetsRender) prepareLogLines(logData []string, size int) []string { logDataLen := len(logData) - if logDataLen > size { - return logData[logDataLen-size : logDataLen] + maxSize := size - 2 // decrease 2 units as the total size of the log list includes also the header and the footer + if maxSize <= 0 { + return []string{} // there isn't place for any log line + } + + if logDataLen > maxSize { + return logData[(logDataLen - maxSize):] } return logData diff --git a/storage/interface.go b/storage/interface.go index 6ae9a52618e..4fb8069d1b2 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -83,7 +83,6 @@ type Storer interface { Put(key, data []byte) error Get(key []byte) ([]byte, error) Has(key []byte) error - HasOrAdd(key []byte, value []byte) error Remove(key []byte) error ClearCache() DestroyUnit() error diff --git a/storage/storageUnit/storageunit.go b/storage/storageUnit/storageunit.go index 51d926d1c71..571676e8d0c 100644 --- a/storage/storageUnit/storageunit.go +++ b/storage/storageUnit/storageunit.go @@ -97,12 +97,6 @@ func (s *Unit) Put(key, data []byte) error { s.lock.Lock() defer s.lock.Unlock() - // no need to add if already present in cache - has := s.cacher.Has(key) - if has { - return nil - } - s.cacher.Put(key, data) err := s.persister.Put(key, data) @@ -168,48 +162,6 @@ func (s *Unit) Has(key []byte) error { return storage.ErrKeyNotFound } -// HasOrAdd checks if the key is present in the storage and if not adds it. -// it updates the cache either way -// it returns if the value was originally found -func (s *Unit) HasOrAdd(key []byte, value []byte) error { - s.lock.Lock() - defer s.lock.Unlock() - - has := s.cacher.Has(key) - if has { - return nil - } - - if s.bloomFilter == nil || s.bloomFilter.MayContain(key) == true { - err := s.persister.Has(key) - if err != nil { - //add it to the cache - s.cacher.Put(key, value) - - // add it also to the persistence unit - err = s.persister.Put(key, value) - if err != nil { - //revert adding to the cache - s.cacher.Remove(key) - } - } - - return err - } - - s.cacher.Put(key, value) - - err := s.persister.Put(key, value) - if err != nil { - s.cacher.Remove(key) - return err - } - - s.bloomFilter.Add(key) - - return nil -} - // Remove removes the data associated to the given key from both cache and persistence medium func (s *Unit) Remove(key []byte) error { s.lock.Lock() diff --git a/storage/storageUnit/storageunit_test.go b/storage/storageUnit/storageunit_test.go index ceb63278697..14b7a460041 100644 --- a/storage/storageUnit/storageunit_test.go +++ b/storage/storageUnit/storageunit_test.go @@ -159,17 +159,20 @@ func TestPutNotPresentCacheWithNilBloomFilter(t *testing.T) { assert.Nil(t, err, "expected to find key %s, but not found", key) } -func TestPutPresent(t *testing.T) { +func TestPutPresentShouldOverwriteValue(t *testing.T) { key, val := []byte("key2"), []byte("value2") s := initStorageUnitWithBloomFilter(t, 10) err := s.Put(key, val) assert.Nil(t, err, "no error expected but got %s", err) - // put again same value, no error expected - err = s.Put(key, val) - + newVal := []byte("value5") + err = s.Put(key, newVal) assert.Nil(t, err, "no error expected but got %s", err) + + returnedVal, err := s.Get(key) + assert.Nil(t, err) + assert.Equal(t, newVal, returnedVal) } func TestPutPresentWithNilBloomFilter(t *testing.T) { @@ -327,72 +330,6 @@ func TestHasPresentWithNilBloomFilter(t *testing.T) { assert.Nil(t, err, "expected no error, but got %s", err) } -func TestHasOrAddNotPresent(t *testing.T) { - key, val := []byte("key9"), []byte("value9") - s := initStorageUnitWithBloomFilter(t, 10) - err := s.HasOrAdd(key, val) - - assert.Nil(t, err) - err = s.Has(key) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddNotPresentWithNilBloomFilter(t *testing.T) { - key, val := []byte("key9"), []byte("value9") - s := initStorageUnitWithNilBloomFilter(t, 10) - err := s.HasOrAdd(key, val) - - assert.Nil(t, err) - err = s.Has(key) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddNotPresentCache(t *testing.T) { - key, val := []byte("key10"), []byte("value10") - s := initStorageUnitWithBloomFilter(t, 10) - err := s.Put(key, val) - - s.ClearCache() - - err = s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddNotPresentCacheWithNilBloomFilter(t *testing.T) { - key, val := []byte("key10"), []byte("value10") - s := initStorageUnitWithNilBloomFilter(t, 10) - err := s.Put(key, val) - - s.ClearCache() - - err = s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddPresent(t *testing.T) { - key, val := []byte("key11"), []byte("value11") - s := initStorageUnitWithBloomFilter(t, 10) - _ = s.Put(key, val) - - err := s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - -func TestHasOrAddPresentWithNilBloomFilter(t *testing.T) { - key, val := []byte("key11"), []byte("value11") - s := initStorageUnitWithNilBloomFilter(t, 10) - _ = s.Put(key, val) - - err := s.HasOrAdd(key, val) - - assert.Nil(t, err, "expected no error, but got %s", err) -} - func TestDeleteNotPresent(t *testing.T) { key := []byte("key12") s := initStorageUnitWithBloomFilter(t, 10)