diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 8c0b06fd55c..8ce200a72f7 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -373,6 +373,7 @@ [ProofsPoolConfig] CleanupNonceDelta = 3 + BucketSize = 100 [BadBlocksCache] Name = "BadBlocksCache" diff --git a/config/config.go b/config/config.go index c77f2f658e9..4712b1ee7b1 100644 --- a/config/config.go +++ b/config/config.go @@ -22,6 +22,7 @@ type HeadersPoolConfig struct { // ProofsPoolConfig will map the proofs cache configuration type ProofsPoolConfig struct { CleanupNonceDelta uint64 + BucketSize int } // DBConfig will map the database configuration diff --git a/dataRetriever/dataPool/proofsCache/export_test.go b/dataRetriever/dataPool/proofsCache/export_test.go new file mode 100644 index 00000000000..f6b0b007405 --- /dev/null +++ b/dataRetriever/dataPool/proofsCache/export_test.go @@ -0,0 +1,34 @@ +package proofscache + +import "github.com/multiversx/mx-chain-core-go/data" + +// NewProofsCache - +func NewProofsCache(bucketSize int) *proofsCache { + return newProofsCache(bucketSize) +} + +// HeadBucketSize - +func (pc *proofsCache) FullProofsByNonceSize() int { + size := 0 + + for _, bucket := range pc.proofsByNonceBuckets { + size += bucket.size() + } + + return size +} + +// ProofsByHashSize - +func (pc *proofsCache) ProofsByHashSize() int { + return len(pc.proofsByHash) +} + +// AddProof - +func (pc *proofsCache) AddProof(proof data.HeaderProofHandler) { + pc.addProof(proof) +} + +// CleanupProofsBehindNonce - +func (pc *proofsCache) CleanupProofsBehindNonce(nonce uint64) { + pc.cleanupProofsBehindNonce(nonce) +} diff --git a/dataRetriever/dataPool/proofsCache/proofsBucket.go b/dataRetriever/dataPool/proofsCache/proofsBucket.go new file mode 100644 index 00000000000..91b5815f440 --- /dev/null +++ b/dataRetriever/dataPool/proofsCache/proofsBucket.go @@ -0,0 +1,26 @@ +package proofscache + +import "github.com/multiversx/mx-chain-core-go/data" + +type proofNonceBucket struct { + maxNonce uint64 + proofsByNonce map[uint64]string +} + +func newProofBucket() *proofNonceBucket { + return &proofNonceBucket{ + proofsByNonce: make(map[uint64]string), + } +} + +func (p *proofNonceBucket) size() int { + return len(p.proofsByNonce) +} + +func (p *proofNonceBucket) insert(proof data.HeaderProofHandler) { + p.proofsByNonce[proof.GetHeaderNonce()] = string(proof.GetHeaderHash()) + + if proof.GetHeaderNonce() > p.maxNonce { + p.maxNonce = proof.GetHeaderNonce() + } +} diff --git a/dataRetriever/dataPool/proofsCache/proofsCache.go b/dataRetriever/dataPool/proofsCache/proofsCache.go index e0cab35dd99..846bc101b13 100644 --- a/dataRetriever/dataPool/proofsCache/proofsCache.go +++ b/dataRetriever/dataPool/proofsCache/proofsCache.go @@ -1,29 +1,24 @@ package proofscache import ( - "sort" "sync" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" ) -type proofNonceMapping struct { - headerHash string - nonce uint64 -} - type proofsCache struct { - mutProofsCache sync.RWMutex - proofsByNonce []*proofNonceMapping - proofsByHash map[string]data.HeaderProofHandler + mutProofsCache sync.RWMutex + proofsByNonceBuckets map[uint64]*proofNonceBucket + bucketSize uint64 + proofsByHash map[string]data.HeaderProofHandler } -func newProofsCache() *proofsCache { +func newProofsCache(bucketSize int) *proofsCache { return &proofsCache{ - mutProofsCache: sync.RWMutex{}, - proofsByNonce: make([]*proofNonceMapping, 0), - proofsByHash: make(map[string]data.HeaderProofHandler), + proofsByNonceBuckets: make(map[uint64]*proofNonceBucket), + bucketSize: uint64(bucketSize), + proofsByHash: make(map[string]data.HeaderProofHandler), } } @@ -47,18 +42,28 @@ func (pc *proofsCache) addProof(proof data.HeaderProofHandler) { pc.mutProofsCache.Lock() defer pc.mutProofsCache.Unlock() - pc.proofsByNonce = append(pc.proofsByNonce, &proofNonceMapping{ - headerHash: string(proof.GetHeaderHash()), - nonce: proof.GetHeaderNonce(), - }) - - sort.Slice(pc.proofsByNonce, func(i, j int) bool { - return pc.proofsByNonce[i].nonce < pc.proofsByNonce[j].nonce - }) + pc.insertProofByNonce(proof) pc.proofsByHash[string(proof.GetHeaderHash())] = proof } +// getBucketKey will return bucket key as lower bound window value +func (pc *proofsCache) getBucketKey(index uint64) uint64 { + return (index / pc.bucketSize) * pc.bucketSize +} + +func (pc *proofsCache) insertProofByNonce(proof data.HeaderProofHandler) { + bucketKey := pc.getBucketKey(proof.GetHeaderNonce()) + + bucket, ok := pc.proofsByNonceBuckets[bucketKey] + if !ok { + bucket = newProofBucket() + pc.proofsByNonceBuckets[bucketKey] = bucket + } + + bucket.insert(proof) +} + func (pc *proofsCache) cleanupProofsBehindNonce(nonce uint64) { if nonce == 0 { return @@ -67,16 +72,16 @@ func (pc *proofsCache) cleanupProofsBehindNonce(nonce uint64) { pc.mutProofsCache.Lock() defer pc.mutProofsCache.Unlock() - proofsByNonce := make([]*proofNonceMapping, 0) - - for _, proofInfo := range pc.proofsByNonce { - if proofInfo.nonce < nonce { - delete(pc.proofsByHash, proofInfo.headerHash) - continue + for key, bucket := range pc.proofsByNonceBuckets { + if nonce > bucket.maxNonce { + pc.cleanupProofsInBucket(bucket) + delete(pc.proofsByNonceBuckets, key) } - - proofsByNonce = append(proofsByNonce, proofInfo) } +} - pc.proofsByNonce = proofsByNonce +func (pc *proofsCache) cleanupProofsInBucket(bucket *proofNonceBucket) { + for _, headerHash := range bucket.proofsByNonce { + delete(pc.proofsByHash, headerHash) + } } diff --git a/dataRetriever/dataPool/proofsCache/proofsCache_bench_test.go b/dataRetriever/dataPool/proofsCache/proofsCache_bench_test.go new file mode 100644 index 00000000000..910895b099c --- /dev/null +++ b/dataRetriever/dataPool/proofsCache/proofsCache_bench_test.go @@ -0,0 +1,66 @@ +package proofscache_test + +import ( + "fmt" + "testing" + + proofscache "github.com/multiversx/mx-chain-go/dataRetriever/dataPool/proofsCache" +) + +func Benchmark_AddProof_Bucket10_Pool1000(b *testing.B) { + benchmarkAddProof(b, 10, 1000) +} + +func Benchmark_AddProof_Bucket100_Pool10000(b *testing.B) { + benchmarkAddProof(b, 100, 10000) +} + +func Benchmark_AddProof_Bucket1000_Pool100000(b *testing.B) { + benchmarkAddProof(b, 1000, 100000) +} + +func benchmarkAddProof(b *testing.B, bucketSize int, nonceRange int) { + pc := proofscache.NewProofsCache(bucketSize) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + proof := generateProof() + nonce := generateRandomNonce(int64(nonceRange)) + + proof.HeaderNonce = nonce + proof.HeaderHash = []byte("hash_" + fmt.Sprintf("%d", nonce)) + b.StartTimer() + + pc.AddProof(proof) + } +} + +func Benchmark_CleanupProofs_Bucket10_Pool1000(b *testing.B) { + benchmarkCleanupProofs(b, 10, 1000) +} + +func Benchmark_CleanupProofs_Bucket100_Pool10000(b *testing.B) { + benchmarkCleanupProofs(b, 100, 10000) +} + +func Benchmark_CleanupProofs_Bucket1000_Pool100000(b *testing.B) { + benchmarkCleanupProofs(b, 1000, 100000) +} + +func benchmarkCleanupProofs(b *testing.B, bucketSize int, nonceRange int) { + pc := proofscache.NewProofsCache(bucketSize) + + for i := uint64(0); i < uint64(nonceRange); i++ { + proof := generateProof() + proof.HeaderNonce = i + proof.HeaderHash = []byte("hash_" + fmt.Sprintf("%d", i)) + + pc.AddProof(proof) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + pc.CleanupProofsBehindNonce(uint64(nonceRange)) + } +} diff --git a/dataRetriever/dataPool/proofsCache/proofsCache_test.go b/dataRetriever/dataPool/proofsCache/proofsCache_test.go new file mode 100644 index 00000000000..84bc70d9104 --- /dev/null +++ b/dataRetriever/dataPool/proofsCache/proofsCache_test.go @@ -0,0 +1,152 @@ +package proofscache_test + +import ( + "fmt" + "math/rand" + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + proofscache "github.com/multiversx/mx-chain-go/dataRetriever/dataPool/proofsCache" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProofsCache(t *testing.T) { + t.Parallel() + + t.Run("incremental nonces, should cleanup all caches", func(t *testing.T) { + t.Parallel() + + proof0 := &block.HeaderProof{HeaderHash: []byte{0}, HeaderNonce: 0} + proof1 := &block.HeaderProof{HeaderHash: []byte{1}, HeaderNonce: 1} + proof2 := &block.HeaderProof{HeaderHash: []byte{2}, HeaderNonce: 2} + proof3 := &block.HeaderProof{HeaderHash: []byte{3}, HeaderNonce: 3} + proof4 := &block.HeaderProof{HeaderHash: []byte{4}, HeaderNonce: 4} + + pc := proofscache.NewProofsCache(4) + + pc.AddProof(proof0) + pc.AddProof(proof1) + pc.AddProof(proof2) + pc.AddProof(proof3) + + require.Equal(t, 4, pc.FullProofsByNonceSize()) + require.Equal(t, 4, pc.ProofsByHashSize()) + + pc.AddProof(proof4) // added to new head bucket + + require.Equal(t, 5, pc.ProofsByHashSize()) + + pc.CleanupProofsBehindNonce(4) + require.Equal(t, 1, pc.ProofsByHashSize()) + + pc.CleanupProofsBehindNonce(10) + require.Equal(t, 0, pc.ProofsByHashSize()) + }) + + t.Run("non incremental nonces", func(t *testing.T) { + t.Parallel() + + proof0 := &block.HeaderProof{HeaderHash: []byte{0}, HeaderNonce: 0} + proof1 := &block.HeaderProof{HeaderHash: []byte{1}, HeaderNonce: 1} + proof2 := &block.HeaderProof{HeaderHash: []byte{2}, HeaderNonce: 2} + proof3 := &block.HeaderProof{HeaderHash: []byte{3}, HeaderNonce: 3} + proof4 := &block.HeaderProof{HeaderHash: []byte{4}, HeaderNonce: 4} + proof5 := &block.HeaderProof{HeaderHash: []byte{5}, HeaderNonce: 5} + + pc := proofscache.NewProofsCache(4) + + pc.AddProof(proof4) + pc.AddProof(proof1) + pc.AddProof(proof2) + pc.AddProof(proof3) + + require.Equal(t, 4, pc.FullProofsByNonceSize()) + require.Equal(t, 4, pc.ProofsByHashSize()) + + pc.AddProof(proof0) // added to new head bucket + + require.Equal(t, 5, pc.FullProofsByNonceSize()) + require.Equal(t, 5, pc.ProofsByHashSize()) + + pc.CleanupProofsBehindNonce(4) + + // cleanup up head bucket with only one proof + require.Equal(t, 1, pc.ProofsByHashSize()) + + pc.AddProof(proof5) // added to new head bucket + + require.Equal(t, 2, pc.ProofsByHashSize()) + + pc.CleanupProofsBehindNonce(5) // will not remove any bucket + require.Equal(t, 2, pc.FullProofsByNonceSize()) + require.Equal(t, 2, pc.ProofsByHashSize()) + + pc.CleanupProofsBehindNonce(10) + require.Equal(t, 0, pc.ProofsByHashSize()) + }) + + t.Run("shuffled nonces, should cleanup all caches", func(t *testing.T) { + t.Parallel() + + pc := proofscache.NewProofsCache(10) + + nonces := generateShuffledNonces(100) + for _, nonce := range nonces { + proof := generateProof() + proof.HeaderNonce = nonce + proof.HeaderHash = []byte("hash_" + fmt.Sprintf("%d", nonce)) + + pc.AddProof(proof) + } + + require.Equal(t, 100, pc.FullProofsByNonceSize()) + require.Equal(t, 100, pc.ProofsByHashSize()) + + pc.CleanupProofsBehindNonce(100) + require.Equal(t, 0, pc.FullProofsByNonceSize()) + require.Equal(t, 0, pc.ProofsByHashSize()) + }) +} + +func TestProofsCache_Concurrency(t *testing.T) { + t.Parallel() + + pc := proofscache.NewProofsCache(100) + + numOperations := 1000 + + wg := sync.WaitGroup{} + wg.Add(numOperations) + + for i := 0; i < numOperations; i++ { + go func(idx int) { + switch idx % 2 { + case 0: + pc.AddProof(generateProof()) + case 1: + pc.CleanupProofsBehindNonce(generateRandomNonce(100)) + default: + assert.Fail(t, "should have not beed called") + } + + wg.Done() + }(i) + } + + wg.Wait() +} + +func generateShuffledNonces(n int) []uint64 { + nonces := make([]uint64, n) + for i := uint64(0); i < uint64(n); i++ { + nonces[i] = i + } + + rand.Shuffle(len(nonces), func(i, j int) { + nonces[i], nonces[j] = nonces[j], nonces[i] + }) + + return nonces +} diff --git a/dataRetriever/dataPool/proofsCache/proofsPool.go b/dataRetriever/dataPool/proofsCache/proofsPool.go index fd91345f584..c53e6d70b9c 100644 --- a/dataRetriever/dataPool/proofsCache/proofsPool.go +++ b/dataRetriever/dataPool/proofsCache/proofsPool.go @@ -10,6 +10,7 @@ import ( ) const defaultCleanupNonceDelta = 3 +const defaultBucketSize = 100 var log = logger.GetOrCreate("dataRetriever/proofscache") @@ -20,19 +21,25 @@ type proofsPool struct { mutAddedProofSubscribers sync.RWMutex addedProofSubscribers []func(headerProof data.HeaderProofHandler) cleanupNonceDelta uint64 + bucketSize int } // NewProofsPool creates a new proofs pool component -func NewProofsPool(cleanupNonceDelta uint64) *proofsPool { +func NewProofsPool(cleanupNonceDelta uint64, bucketSize int) *proofsPool { if cleanupNonceDelta < defaultCleanupNonceDelta { log.Debug("proofs pool: using default cleanup nonce delta", "cleanupNonceDelta", defaultCleanupNonceDelta) cleanupNonceDelta = defaultCleanupNonceDelta } + if bucketSize < defaultBucketSize { + log.Debug("proofs pool: using default bucket size", "bucketSize", defaultBucketSize) + bucketSize = defaultBucketSize + } return &proofsPool{ cache: make(map[uint32]*proofsCache), addedProofSubscribers: make([]func(headerProof data.HeaderProofHandler), 0), cleanupNonceDelta: cleanupNonceDelta, + bucketSize: bucketSize, } } @@ -55,7 +62,7 @@ func (pp *proofsPool) AddProof( pp.mutCache.Lock() proofsPerShard, ok := pp.cache[shardID] if !ok { - proofsPerShard = newProofsCache() + proofsPerShard = newProofsCache(pp.bucketSize) pp.cache[shardID] = proofsPerShard } pp.mutCache.Unlock() diff --git a/dataRetriever/dataPool/proofsCache/proofsPool_test.go b/dataRetriever/dataPool/proofsCache/proofsPool_test.go index 14b25f63541..92be9475725 100644 --- a/dataRetriever/dataPool/proofsCache/proofsPool_test.go +++ b/dataRetriever/dataPool/proofsCache/proofsPool_test.go @@ -16,6 +16,7 @@ import ( ) const cleanupDelta = 3 +const bucketSize = 100 var shardID = uint32(1) @@ -56,14 +57,14 @@ var proof4 = &block.HeaderProof{ func TestNewProofsPool(t *testing.T) { t.Parallel() - pp := proofscache.NewProofsPool(cleanupDelta) + pp := proofscache.NewProofsPool(cleanupDelta, bucketSize) require.False(t, pp.IsInterfaceNil()) } func TestProofsPool_ShouldWork(t *testing.T) { t.Parallel() - pp := proofscache.NewProofsPool(cleanupDelta) + pp := proofscache.NewProofsPool(cleanupDelta, bucketSize) _ = pp.AddProof(proof1) _ = pp.AddProof(proof2) @@ -92,7 +93,7 @@ func TestProofsPool_ShouldWork(t *testing.T) { func TestProofsPool_RegisterHandler(t *testing.T) { t.Parallel() - pp := proofscache.NewProofsPool(cleanupDelta) + pp := proofscache.NewProofsPool(cleanupDelta, bucketSize) wasCalled := false wg := sync.WaitGroup{} @@ -117,21 +118,14 @@ func TestProofsPool_CleanupProofsBehindNonce(t *testing.T) { t.Run("should not cleanup proofs behind delta", func(t *testing.T) { t.Parallel() - pp := proofscache.NewProofsPool(cleanupDelta) + pp := proofscache.NewProofsPool(cleanupDelta, bucketSize) _ = pp.AddProof(proof1) _ = pp.AddProof(proof2) _ = pp.AddProof(proof3) _ = pp.AddProof(proof4) - err := pp.CleanupProofsBehindNonce(shardID, 5) - require.Nil(t, err) - - proof, err := pp.GetProof(shardID, []byte("hash1")) - require.Equal(t, proofscache.ErrMissingProof, err) - require.Nil(t, proof) - - _, err = pp.GetProof(shardID, []byte("hash2")) + _, err := pp.GetProof(shardID, []byte("hash2")) require.Nil(t, err) _, err = pp.GetProof(shardID, []byte("hash3")) require.Nil(t, err) @@ -142,7 +136,7 @@ func TestProofsPool_CleanupProofsBehindNonce(t *testing.T) { t.Run("should not cleanup if nonce smaller or equal to delta", func(t *testing.T) { t.Parallel() - pp := proofscache.NewProofsPool(cleanupDelta) + pp := proofscache.NewProofsPool(cleanupDelta, bucketSize) _ = pp.AddProof(proof1) _ = pp.AddProof(proof2) @@ -166,7 +160,7 @@ func TestProofsPool_CleanupProofsBehindNonce(t *testing.T) { func TestProofsPool_Concurrency(t *testing.T) { t.Parallel() - pp := proofscache.NewProofsPool(cleanupDelta) + pp := proofscache.NewProofsPool(cleanupDelta, bucketSize) numOperations := 1000 @@ -186,7 +180,7 @@ func TestProofsPool_Concurrency(t *testing.T) { atomic.AddUint32(&cnt, 1) } case 4: - _ = pp.CleanupProofsBehindNonce(generateRandomShardID(), generateRandomNonce()) + _ = pp.CleanupProofsBehindNonce(generateRandomShardID(), generateRandomNonce(100)) case 5: handler := func(proof data.HeaderProofHandler) { } @@ -199,6 +193,8 @@ func TestProofsPool_Concurrency(t *testing.T) { }(i) } + wg.Wait() + require.GreaterOrEqual(t, uint32(numOperations/3), atomic.LoadUint32(&cnt)) } @@ -206,7 +202,7 @@ func generateProof() *block.HeaderProof { return &block.HeaderProof{ HeaderHash: generateRandomHash(), HeaderEpoch: 1, - HeaderNonce: generateRandomNonce(), + HeaderNonce: generateRandomNonce(100), HeaderShardId: generateRandomShardID(), } } @@ -217,8 +213,8 @@ func generateRandomHash() []byte { return hash } -func generateRandomNonce() uint64 { - val := generateRandomInt(3) +func generateRandomNonce(n int64) uint64 { + val := generateRandomInt(n) return val.Uint64() } diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index e869b249fdc..e375ed2c785 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -151,7 +151,7 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) return nil, fmt.Errorf("%w while creating the cache for the validator info results", err) } - proofsPool := proofscache.NewProofsPool(mainConfig.ProofsPoolConfig.CleanupNonceDelta) + proofsPool := proofscache.NewProofsPool(mainConfig.ProofsPoolConfig.CleanupNonceDelta, mainConfig.ProofsPoolConfig.BucketSize) currBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index ccaf283e44a..b5de990bc2f 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -1088,7 +1088,7 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] } func (tpn *TestProcessorNode) initDataPools() { - tpn.ProofsPool = proofscache.NewProofsPool(3) + tpn.ProofsPool = proofscache.NewProofsPool(3, 100) tpn.DataPool = dataRetrieverMock.CreatePoolsHolderWithProofsPool(1, tpn.ShardCoordinator.SelfId(), tpn.ProofsPool) cacherCfg := storageunit.CacheConfig{Capacity: 10000, Type: storageunit.LRUCache, Shards: 1} suCache, _ := storageunit.NewCache(cacherCfg) diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index c39d8c61d76..f46bd6577a2 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -292,7 +292,7 @@ func initDataPool(testHash []byte) *dataRetrieverMock.PoolsHolderStub { return cs }, ProofsCalled: func() dataRetriever.ProofsPool { - return proofscache.NewProofsPool(3) + return proofscache.NewProofsPool(3, 100) }, } diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index 5f151a2f73b..bff8b653001 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -135,7 +135,7 @@ func createPoolHolderArgs(numShards uint32, selfShard uint32) dataPool.DataPoolA }) panicIfError("CreatePoolsHolder", err) - proofsPool := proofscache.NewProofsPool(3) + proofsPool := proofscache.NewProofsPool(3, 100) currentBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currentEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() @@ -245,7 +245,7 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) heartbeatPool, err := storageunit.NewCache(cacherConfig) panicIfError("CreatePoolsHolderWithTxPool", err) - proofsPool := proofscache.NewProofsPool(3) + proofsPool := proofscache.NewProofsPool(3, 100) currentBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currentEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 0911bf44f93..6dc5266c062 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -110,7 +110,7 @@ func NewPoolsHolderMock() *PoolsHolderMock { }) panicIfError("NewPoolsHolderMock", err) - holder.proofs = proofscache.NewProofsPool(3) + holder.proofs = proofscache.NewProofsPool(3, 100) return holder }