From 7382b7aec0f14e8b98c95605e2d1e538148a00f5 Mon Sep 17 00:00:00 2001 From: iulianpascalau Date: Thu, 26 Dec 2019 10:20:03 +0200 Subject: [PATCH] fixes after review: method and variables renaming --- cmd/node/factory/structs.go | 24 ++++----- .../p2p/antiflood/antiflooding_test.go | 26 +++++----- p2p/antiflood/p2pAntiflood.go | 4 +- p2p/antiflood/p2pAntiflood_test.go | 10 ++-- p2p/mock/floodPreventerStub.go | 14 ++--- p2p/p2p.go | 4 +- process/interface.go | 4 +- .../throttle/antiflood/quotaFloodPreventer.go | 23 ++++---- .../antiflood/quotaFloodPreventer_test.go | 52 +++++++++---------- statusHandler/p2pQuota/p2pQuotaProcessor.go | 2 +- .../p2pQuota/p2pQuotaProcessor_test.go | 18 +++---- 11 files changed, 90 insertions(+), 91 deletions(-) diff --git a/cmd/node/factory/structs.go b/cmd/node/factory/structs.go index d7957b27284..7b12db736b2 100644 --- a/cmd/node/factory/structs.go +++ b/cmd/node/factory/structs.go @@ -482,16 +482,16 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa return nil, err } - maxMessagesPerPeer := mainConfig.Antiflood.PeerMaxMessagesPerSecond - maxTotalSizePerPeer := mainConfig.Antiflood.PeerMaxTotalSizePerSecond - maxMessages := mainConfig.Antiflood.MaxMessagesPerSecond - maxTotalSize := mainConfig.Antiflood.MaxTotalSizePerSecond + peerMaxMessagesPerSecond := mainConfig.Antiflood.PeerMaxMessagesPerSecond + peerMaxTotalSizePerSecond := mainConfig.Antiflood.PeerMaxTotalSizePerSecond + maxMessagesPerSecond := mainConfig.Antiflood.MaxMessagesPerSecond + maxTotalSizePerSecond := mainConfig.Antiflood.MaxTotalSizePerSecond log.Debug("started antiflood component", - "maxMessagesPerPeer", maxMessagesPerPeer, - "maxTotalSizePerPeer", core.ConvertBytes(maxTotalSizePerPeer), - "maxMessages", maxMessages, - "maxTotalSize", core.ConvertBytes(maxTotalSize), + "peerMaxMessagesPerSecond", peerMaxMessagesPerSecond, + "peerMaxTotalSizePerSecond", core.ConvertBytes(peerMaxTotalSizePerSecond), + "maxMessagesPerSecond", maxMessagesPerSecond, + "maxTotalSizePerSecond", core.ConvertBytes(maxTotalSizePerSecond), ) quotaProcessor, err := p2pQuota.NewP2pQuotaProcessor(status) @@ -502,10 +502,10 @@ func createAntifloodComponent(mainConfig *config.Config, status core.AppStatusHa floodPreventer, err := antifloodThrottle.NewQuotaFloodPreventer( antifloodCache, quotaProcessor, - maxMessagesPerPeer, - maxTotalSizePerPeer, - maxMessages, - maxTotalSize, + peerMaxMessagesPerSecond, + peerMaxTotalSizePerSecond, + maxMessagesPerSecond, + maxTotalSizePerSecond, ) if err != nil { return nil, err diff --git a/integrationTests/p2p/antiflood/antiflooding_test.go b/integrationTests/p2p/antiflood/antiflooding_test.go index 07eaa281818..46aac60a95f 100644 --- a/integrationTests/p2p/antiflood/antiflooding_test.go +++ b/integrationTests/p2p/antiflood/antiflooding_test.go @@ -37,15 +37,15 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - peerMaxMumProcessMessages := uint32(5) - maxMumProcessMessages := uint32(math.MaxUint32) + peerMaxNumProcessMessages := uint32(5) + maxNumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(1 << 20) //1MB interceptors, err := createTopicsAndMockInterceptors( peers, topic, - peerMaxMumProcessMessages, + peerMaxNumProcessMessages, maxMessageSize, - maxMumProcessMessages, + maxNumProcessMessages, maxMessageSize, ) assert.Nil(t, err) @@ -76,7 +76,7 @@ func TestAntifloodWithNumMessagesFromTheSamePeer(t *testing.T) { isFlooding.Store(false) - checkMessagesOnPeers(t, peers, interceptors, peerMaxMumProcessMessages, floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, peerMaxNumProcessMessages, floodedIdxes, protectedIdexes) } // TestAntifloodWithMessagesFromOtherPeers tests what happens if a peer decide to send a number of messages @@ -99,15 +99,15 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { // (check integrationTests.CreateFixedNetworkOf14Peers function) topic := "test_topic" broadcastMessageDuration := time.Second * 2 - peerMaxMumProcessMessages := uint32(5) - maxMumProcessMessages := uint32(math.MaxUint32) + peerMaxNumProcessMessages := uint32(5) + maxNumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(1 << 20) //1MB interceptors, err := createTopicsAndMockInterceptors( peers, topic, - peerMaxMumProcessMessages, + peerMaxNumProcessMessages, maxMessageSize, - maxMumProcessMessages, + maxNumProcessMessages, maxMessageSize, ) assert.Nil(t, err) @@ -131,7 +131,7 @@ func TestAntifloodWithNumMessagesFromOtherPeers(t *testing.T) { } time.Sleep(broadcastMessageDuration) - checkMessagesOnPeers(t, peers, interceptors, peerMaxMumProcessMessages, floodedIdxes, protectedIdexes) + checkMessagesOnPeers(t, peers, interceptors, peerMaxNumProcessMessages, floodedIdxes, protectedIdexes) } // TestAntifloodWithMessagesFromTheSamePeer tests what happens if a peer decide to send large messages @@ -154,15 +154,15 @@ func TestAntifloodWithLargeSizeMessagesFromTheSamePeer(t *testing.T) { topic := "test_topic" broadcastMessageDuration := time.Second * 2 - maxMumProcessMessages := uint32(math.MaxUint32) + maxNumProcessMessages := uint32(math.MaxUint32) maxMessageSize := uint64(math.MaxUint64) peerMaxMessageSize := uint64(1 << 10) //1KB interceptors, err := createTopicsAndMockInterceptors( peers, topic, - maxMumProcessMessages, + maxNumProcessMessages, peerMaxMessageSize, - maxMumProcessMessages, + maxNumProcessMessages, maxMessageSize, ) assert.Nil(t, err) diff --git a/p2p/antiflood/p2pAntiflood.go b/p2p/antiflood/p2pAntiflood.go index dd8bc75412e..4eabaabbd96 100644 --- a/p2p/antiflood/p2pAntiflood.go +++ b/p2p/antiflood/p2pAntiflood.go @@ -34,14 +34,14 @@ func (af *p2pAntiflood) CanProcessMessage(message p2p.MessageP2P, fromConnectedP } //protect from directly connected peer - ok := floodPreventer.IncrementAddingToSum(fromConnectedPeer.Pretty(), uint64(len(message.Data()))) + ok := floodPreventer.AccumulateGlobal(fromConnectedPeer.Pretty(), uint64(len(message.Data()))) if !ok { return fmt.Errorf("%w in p2pAntiflood for connected peer", p2p.ErrSystemBusy) } if fromConnectedPeer != message.Peer() { //protect from the flooding messages that originate from the same source but come from different peers - ok = floodPreventer.Increment(message.Peer().Pretty(), uint64(len(message.Data()))) + ok = floodPreventer.Accumulate(message.Peer().Pretty(), uint64(len(message.Data()))) if !ok { return fmt.Errorf("%w in p2pAntiflood for originator", p2p.ErrSystemBusy) } diff --git a/p2p/antiflood/p2pAntiflood_test.go b/p2p/antiflood/p2pAntiflood_test.go index 93564438201..40b04b07319 100644 --- a/p2p/antiflood/p2pAntiflood_test.go +++ b/p2p/antiflood/p2pAntiflood_test.go @@ -68,7 +68,7 @@ func TestP2pAntiflood_CanNotIncrementFromConnectedPeerShouldError(t *testing.T) FromField: messageOriginator, } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementAddingToSumCalled: func(identifier string, size uint64) bool { + AccumulateGlobalCalled: func(identifier string, size uint64) bool { if identifier != fromConnectedPeer.Pretty() { assert.Fail(t, "should have been the connected peer") } @@ -92,10 +92,10 @@ func TestP2pAntiflood_CanNotIncrementMessageOriginatorShouldError(t *testing.T) PeerField: p2p.PeerID(messageOriginator), } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementAddingToSumCalled: func(identifier string, size uint64) bool { + AccumulateGlobalCalled: func(identifier string, size uint64) bool { return identifier == fromConnectedPeer.Pretty() }, - IncrementCalled: func(identifier string, size uint64) bool { + AccumulateCalled: func(identifier string, size uint64) bool { return identifier != message.PeerField.Pretty() }, }) @@ -114,10 +114,10 @@ func TestP2pAntiflood_ShouldWork(t *testing.T) { PeerField: p2p.PeerID(messageOriginator), } afm, _ := antiflood.NewP2pAntiflood(&mock.FloodPreventerStub{ - IncrementAddingToSumCalled: func(identifier string, size uint64) bool { + AccumulateGlobalCalled: func(identifier string, size uint64) bool { return true }, - IncrementCalled: func(identifier string, size uint64) bool { + AccumulateCalled: func(identifier string, size uint64) bool { return true }, }) diff --git a/p2p/mock/floodPreventerStub.go b/p2p/mock/floodPreventerStub.go index 27278e1aa96..022f863927b 100644 --- a/p2p/mock/floodPreventerStub.go +++ b/p2p/mock/floodPreventerStub.go @@ -1,17 +1,17 @@ package mock type FloodPreventerStub struct { - IncrementAddingToSumCalled func(identifier string, size uint64) bool - IncrementCalled func(identifier string, size uint64) bool - ResetCalled func() + AccumulateGlobalCalled func(identifier string, size uint64) bool + AccumulateCalled func(identifier string, size uint64) bool + ResetCalled func() } -func (fps *FloodPreventerStub) IncrementAddingToSum(identifier string, size uint64) bool { - return fps.IncrementAddingToSumCalled(identifier, size) +func (fps *FloodPreventerStub) AccumulateGlobal(identifier string, size uint64) bool { + return fps.AccumulateGlobalCalled(identifier, size) } -func (fps *FloodPreventerStub) Increment(identifier string, size uint64) bool { - return fps.IncrementCalled(identifier, size) +func (fps *FloodPreventerStub) Accumulate(identifier string, size uint64) bool { + return fps.AccumulateCalled(identifier, size) } func (fps *FloodPreventerStub) Reset() { diff --git a/p2p/p2p.go b/p2p/p2p.go index 70ee40d007b..aba43118e9c 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -195,8 +195,8 @@ type PeerDiscoveryFactory interface { // FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls type FloodPreventer interface { - IncrementAddingToSum(identifier string, size uint64) bool - Increment(identifier string, size uint64) bool + AccumulateGlobal(identifier string, size uint64) bool + Accumulate(identifier string, size uint64) bool Reset() IsInterfaceNil() bool } diff --git a/process/interface.go b/process/interface.go index 120267bc0a5..681b098eb78 100644 --- a/process/interface.go +++ b/process/interface.go @@ -543,8 +543,8 @@ type InterceptedHeaderSigVerifier interface { // FloodPreventer defines the behavior of a component that is able to signal that too many events occurred // on a provided identifier between Reset calls type FloodPreventer interface { - IncrementAddingToSum(identifier string, size uint64) bool - Increment(identifier string, size uint64) bool + AccumulateGlobal(identifier string, size uint64) bool + Accumulate(identifier string, size uint64) bool Reset() IsInterfaceNil() bool } diff --git a/process/throttle/antiflood/quotaFloodPreventer.go b/process/throttle/antiflood/quotaFloodPreventer.go index 15aee8e7e4a..9c86260c318 100644 --- a/process/throttle/antiflood/quotaFloodPreventer.go +++ b/process/throttle/antiflood/quotaFloodPreventer.go @@ -89,40 +89,40 @@ func NewQuotaFloodPreventer( }, nil } -// IncrementAddingToSum tries to increment the counter values held at "identifier" position +// AccumulateGlobal tries to increment the counter values held at "identifier" position // It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) // We need the mutOperation here as the get and put should be done atomically. // Otherwise we might yield a slightly higher number of false valid increments // This method also checks the global sum quota and increment its values -func (qfp *quotaFloodPreventer) IncrementAddingToSum(identifier string, size uint64) bool { +func (qfp *quotaFloodPreventer) AccumulateGlobal(identifier string, size uint64) bool { qfp.mutOperation.Lock() - defer qfp.mutOperation.Unlock() qfp.globalQuota.numReceivedMessages++ qfp.globalQuota.sizeReceivedMessages += size - result := qfp.increment(identifier, size) - if result { + isQuotaNotReached := qfp.accumulate(identifier, size) + if isQuotaNotReached { qfp.globalQuota.numProcessedMessages++ qfp.globalQuota.sizeProcessedMessages += size } + qfp.mutOperation.Unlock() - return result + return isQuotaNotReached } -// Increment tries to increment the counter values held at "identifier" position +// Accumulate tries to increment the counter values held at "identifier" position // It returns true if it had succeeded incrementing (existing counter value is lower or equal with provided maxOperations) // We need the mutOperation here as the get and put should be done atomically. // Otherwise we might yield a slightly higher number of false valid increments // This method also checks the global sum quota but does not increment its values -func (qfp *quotaFloodPreventer) Increment(identifier string, size uint64) bool { +func (qfp *quotaFloodPreventer) Accumulate(identifier string, size uint64) bool { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() - return qfp.increment(identifier, size) + return qfp.accumulate(identifier, size) } -func (qfp *quotaFloodPreventer) increment(identifier string, size uint64) bool { +func (qfp *quotaFloodPreventer) accumulate(identifier string, size uint64) bool { isGlobalQuotaReached := qfp.globalQuota.numReceivedMessages > qfp.maxMessages || qfp.globalQuota.sizeReceivedMessages > qfp.maxSize if isGlobalQuotaReached { @@ -174,6 +174,7 @@ func (qfp *quotaFloodPreventer) Reset() { qfp.mutOperation.Lock() defer qfp.mutOperation.Unlock() + qfp.statusHandler.ResetStatistics() qfp.createStatistics() //TODO change this if cacher.Clear() is time consuming @@ -183,8 +184,6 @@ func (qfp *quotaFloodPreventer) Reset() { // createStatistics is useful to benchmark the system when running func (qfp quotaFloodPreventer) createStatistics() { - qfp.statusHandler.ResetStatistics() - keys := qfp.cacher.Keys() for _, k := range keys { val, ok := qfp.cacher.Get(k) diff --git a/process/throttle/antiflood/quotaFloodPreventer_test.go b/process/throttle/antiflood/quotaFloodPreventer_test.go index b7f5d31d652..8c6153c689a 100644 --- a/process/throttle/antiflood/quotaFloodPreventer_test.go +++ b/process/throttle/antiflood/quotaFloodPreventer_test.go @@ -134,9 +134,9 @@ func TestNewQuotaFloodPreventer_ShouldWork(t *testing.T) { assert.Nil(t, err) } -//------- Increment +//------- Accumulate -func TestNewQuotaFloodPreventer_IncrementIdentifierNotPresentPutQuotaAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateIdentifierNotPresentPutQuotaAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -165,13 +165,13 @@ func TestNewQuotaFloodPreventer_IncrementIdentifierNotPresentPutQuotaAndReturnTr minTotalSize*10, ) - ok := qfp.Increment("identifier", size) + ok := qfp.Accumulate("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_IncrementNotQuotaSavedInCacheShouldPutQuotaAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateNotQuotaSavedInCacheShouldPutQuotaAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -200,13 +200,13 @@ func TestNewQuotaFloodPreventer_IncrementNotQuotaSavedInCacheShouldPutQuotaAndRe minTotalSize*10, ) - ok := qfp.Increment("identifier", size) + ok := qfp.Accumulate("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnTrue(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateUnderMaxValuesShouldIncrementAndReturnTrue(t *testing.T) { t.Parallel() putWasCalled := false @@ -241,16 +241,16 @@ func TestNewQuotaFloodPreventer_IncrementUnderMaxValuesShouldIncrementAndReturnT minTotalSize*10, ) - ok := qfp.Increment("identifier", size) + ok := qfp.Accumulate("identifier", size) assert.True(t, ok) assert.True(t, putWasCalled) } -func TestNewQuotaFloodPreventer_IncrementAddingSumWithResetShouldWork(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateGlobalWithResetShouldWork(t *testing.T) { t.Parallel() - putWasCalled := 0 + numPutOperations := 0 addedGlobalQuotaCalled := false existingSize := uint64(0) existingMessages := uint32(0) @@ -267,7 +267,7 @@ func TestNewQuotaFloodPreventer_IncrementAddingSumWithResetShouldWork(t *testing }, PutCalled: func(key []byte, value interface{}) (evicted bool) { if string(key) == identifier { - putWasCalled++ + numPutOperations++ } return @@ -294,21 +294,21 @@ func TestNewQuotaFloodPreventer_IncrementAddingSumWithResetShouldWork(t *testing minTotalSize*10, ) - ok := qfp.IncrementAddingToSum(identifier, size) + ok := qfp.AccumulateGlobal(identifier, size) assert.True(t, ok) - ok = qfp.IncrementAddingToSum(identifier, size+1) + ok = qfp.AccumulateGlobal(identifier, size+1) assert.True(t, ok) qfp.Reset() - assert.Equal(t, 2, putWasCalled) + assert.Equal(t, 2, numPutOperations) assert.True(t, addedGlobalQuotaCalled) } -//------- Increment per peer +//------- Accumulate per peer -func TestNewQuotaFloodPreventer_IncrementOverMaxPeerNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxPeerNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages + 11) @@ -335,12 +335,12 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxPeerNumMessagesShouldNotPutAndRe minTotalSize*10, ) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -func TestNewQuotaFloodPreventer_IncrementOverMaxPeerSizeShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxPeerSizeShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() existingMessages := uint32(minMessages) @@ -367,14 +367,14 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxPeerSizeShouldNotPutAndReturnFal minTotalSize*10, ) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -//------- Increment globally +//------- Accumulate globally -func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxNumMessagesShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() globalMessages := uint32(minMessages + 11) @@ -398,12 +398,12 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxNumMessagesShouldNotPutAndReturn ) qfp.SetGlobalQuotaValues(globalMessages, globalSize) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { +func TestNewQuotaFloodPreventer_AccumulateOverMaxSizeShouldNotPutAndReturnFalse(t *testing.T) { t.Parallel() globalMessages := uint32(minMessages) @@ -427,12 +427,12 @@ func TestNewQuotaFloodPreventer_IncrementOverMaxSizeShouldNotPutAndReturnFalse(t ) qfp.SetGlobalQuotaValues(globalMessages, globalSize) - ok := qfp.Increment("identifier", minTotalSize) + ok := qfp.Accumulate("identifier", minTotalSize) assert.False(t, ok) } -func TestCountersMap_IncrementShouldWorkConcurrently(t *testing.T) { +func TestCountersMap_AccumulateShouldWorkConcurrently(t *testing.T) { t.Parallel() numIterations := 1000 @@ -448,7 +448,7 @@ func TestCountersMap_IncrementShouldWorkConcurrently(t *testing.T) { wg.Add(numIterations) for i := 0; i < numIterations; i++ { go func(idx int) { - ok := qfp.Increment(fmt.Sprintf("%d", idx), minTotalSize) + ok := qfp.Accumulate(fmt.Sprintf("%d", idx), minTotalSize) assert.True(t, ok) wg.Done() }(i) @@ -568,7 +568,7 @@ func TestCountersMap_IncrementAndResetShouldWorkConcurrently(t *testing.T) { wg.Add(numIterations + numIterations/10) for i := 0; i < numIterations; i++ { go func(idx int) { - ok := qfp.Increment(fmt.Sprintf("%d", idx), minTotalSize) + ok := qfp.Accumulate(fmt.Sprintf("%d", idx), minTotalSize) assert.True(t, ok) wg.Done() }(i) diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor.go b/statusHandler/p2pQuota/p2pQuotaProcessor.go index a05a1e13e4e..a5c6ba326e9 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor.go @@ -15,7 +15,7 @@ type quota struct { sizeProcessedMessages uint64 } -// p2pQuotaProcessor implements process.QuotaStatusHandler and is able to periodically sends to a +// p2pQuotaProcessor implements process.QuotaStatusHandler and is able to periodically send to a // statusHandler the processed p2p quota information type p2pQuotaProcessor struct { mutStatistics sync.Mutex diff --git a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go index ce7f2551867..309dcb06f30 100644 --- a/statusHandler/p2pQuota/p2pQuotaProcessor_test.go +++ b/statusHandler/p2pQuota/p2pQuotaProcessor_test.go @@ -57,11 +57,11 @@ func TestP2pQuotaProcessor_AddQuotaShouldWork(t *testing.T) { func TestP2pQuotaProcessor_ResetStatisticsShouldEmptyStatsAndCallSetOnAllMetrics(t *testing.T) { t.Parallel() - identifier1 := "identifier" - numReceived1 := uint64(1) - sizeReceived1 := uint64(2) - numProcessed1 := uint64(3) - sizeProcessed1 := uint64(4) + identifier := "identifier" + numReceived := uint64(1) + sizeReceived := uint64(2) + numProcessed := uint64(3) + sizeProcessed := uint64(4) numReceivedNetwork := uint64(5) sizeReceivedNetwork := uint64(6) @@ -70,18 +70,18 @@ func TestP2pQuotaProcessor_ResetStatisticsShouldEmptyStatsAndCallSetOnAllMetrics status := mock.NewAppStatusHandlerMock() pqp, _ := p2pQuota.NewP2pQuotaProcessor(status) - pqp.AddQuota(identifier1, uint32(numReceived1), sizeReceived1, uint32(numProcessed1), sizeProcessed1) + pqp.AddQuota(identifier, uint32(numReceived), sizeReceived, uint32(numProcessed), sizeProcessed) pqp.SetGlobalQuota(uint32(numReceivedNetwork), sizeReceivedNetwork, uint32(numProcessedNetwork), sizeProcessedNetwork) pqp.ResetStatistics() - assert.Nil(t, pqp.GetQuota(identifier1)) + assert.Nil(t, pqp.GetQuota(identifier)) numReceivers := uint64(1) checkNetworkMetrics(t, status, numReceivedNetwork, sizeReceivedNetwork, numProcessedNetwork, sizeProcessedNetwork) checkPeakNetworkMetrics(t, status, numReceivedNetwork, sizeReceivedNetwork, numProcessedNetwork, sizeProcessedNetwork) - checkPeerMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) - checkPeakPeerMetrics(t, status, numReceived1, sizeReceived1, numProcessed1, sizeProcessed1) + checkPeerMetrics(t, status, numReceived, sizeReceived, numProcessed, sizeProcessed) + checkPeakPeerMetrics(t, status, numReceived, sizeReceived, numProcessed, sizeProcessed) checkNumReceivers(t, status, numReceivers, numReceivers) }