Skip to content
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cmd/node/config/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,7 @@

[ProofsPoolConfig]
CleanupNonceDelta = 3
BucketSize = 100

[BadBlocksCache]
Name = "BadBlocksCache"
Expand Down
1 change: 1 addition & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ type HeadersPoolConfig struct {
// ProofsPoolConfig will map the proofs cache configuration
type ProofsPoolConfig struct {
CleanupNonceDelta uint64
BucketSize int
}

// DBConfig will map the database configuration
Expand Down
43 changes: 43 additions & 0 deletions dataRetriever/dataPool/proofsCache/export_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
package proofscache

import "github.com/multiversx/mx-chain-core-go/data"

// NewProofsCache -
func NewProofsCache(bucketSize int) *proofsCache {
return newProofsCache(bucketSize)
}

// HeadBucketSize -
func (pc *proofsCache) HeadBucketSize() int {
if len(pc.proofsByNonceBuckets) > 0 {
return len(pc.proofsByNonceBuckets[0].proofsByNonce)
}

return 0
}

// HeadBucketSize -
func (pc *proofsCache) FullProofsByNonceSize() int {
size := 0

for _, bucket := range pc.proofsByNonceBuckets {
size += bucket.size()
}

return size
}

// ProofsByHashSize -
func (pc *proofsCache) ProofsByHashSize() int {
return len(pc.proofsByHash)
}

// AddProof -
func (pc *proofsCache) AddProof(proof data.HeaderProofHandler) {
pc.addProof(proof)
}

// CleanupProofsBehindNonce -
func (pc *proofsCache) CleanupProofsBehindNonce(nonce uint64) {
pc.cleanupProofsBehindNonce(nonce)
}
35 changes: 35 additions & 0 deletions dataRetriever/dataPool/proofsCache/proofsBucket.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
package proofscache

import "github.com/multiversx/mx-chain-core-go/data"

type proofNonceBucket struct {
maxNonce uint64
proofsByNonce []*proofNonceMapping
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe use a map then the upsert would not need to iterate

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

changed to use map

bucketSize int
}

func newProofBucket(bucketSize int) *proofNonceBucket {
return &proofNonceBucket{
proofsByNonce: make([]*proofNonceMapping, 0),
bucketSize: bucketSize,
}
}

func (p *proofNonceBucket) size() int {
return len(p.proofsByNonce)
}

func (p *proofNonceBucket) isFull() bool {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would use fixed size and deterministic range for each bucket.
e.g a proof with nonce x should be added into the bucket with first nonce x/bucketSize * bucketSize (integer division)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

changed to use fixed size buckets

return len(p.proofsByNonce) >= p.bucketSize
}

func (p *proofNonceBucket) insert(proof data.HeaderProofHandler) {
p.proofsByNonce = append(p.proofsByNonce, &proofNonceMapping{
headerHash: string(proof.GetHeaderHash()),
nonce: proof.GetHeaderNonce(),
})

if proof.GetHeaderNonce() > p.maxNonce {
p.maxNonce = proof.GetHeaderNonce()
}
}
66 changes: 44 additions & 22 deletions dataRetriever/dataPool/proofsCache/proofsCache.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package proofscache

import (
"sort"
"sync"

"github.com/multiversx/mx-chain-core-go/data"
Expand All @@ -13,16 +12,17 @@ type proofNonceMapping struct {
}

type proofsCache struct {
mutProofsCache sync.RWMutex
proofsByNonce []*proofNonceMapping
proofsByHash map[string]data.HeaderProofHandler
mutProofsCache sync.RWMutex
proofsByNonceBuckets []*proofNonceBucket
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this can be a map as well with the key the lowest nonce that the bucket would hold

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

changed to use sync.Map

bucketSize int
proofsByHash map[string]data.HeaderProofHandler
}

func newProofsCache() *proofsCache {
func newProofsCache(bucketSize int) *proofsCache {
return &proofsCache{
mutProofsCache: sync.RWMutex{},
proofsByNonce: make([]*proofNonceMapping, 0),
proofsByHash: make(map[string]data.HeaderProofHandler),
proofsByNonceBuckets: make([]*proofNonceBucket, 0),
bucketSize: bucketSize,
proofsByHash: make(map[string]data.HeaderProofHandler),
}
}

Expand All @@ -46,18 +46,34 @@ func (pc *proofsCache) addProof(proof data.HeaderProofHandler) {
pc.mutProofsCache.Lock()
defer pc.mutProofsCache.Unlock()

pc.proofsByNonce = append(pc.proofsByNonce, &proofNonceMapping{
headerHash: string(proof.GetHeaderHash()),
nonce: proof.GetHeaderNonce(),
})

sort.Slice(pc.proofsByNonce, func(i, j int) bool {
return pc.proofsByNonce[i].nonce < pc.proofsByNonce[j].nonce
})
pc.insertProofByNonce(proof)

pc.proofsByHash[string(proof.GetHeaderHash())] = proof
}

func (pc *proofsCache) insertProofByNonce(proof data.HeaderProofHandler) {
if len(pc.proofsByNonceBuckets) == 0 {
pc.insertInNewBucket(proof)
return
}

headBucket := pc.proofsByNonceBuckets[0]

if headBucket.isFull() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in case of syncing node this will cause it to keep accumulating new nonces in each old nonces buckets, which will delay their cleanup.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

new model with range buckets should cover this case

pc.insertInNewBucket(proof)
return
}

headBucket.insert(proof)
}

func (pc *proofsCache) insertInNewBucket(proof data.HeaderProofHandler) {
bucket := newProofBucket(pc.bucketSize)
bucket.insert(proof)

pc.proofsByNonceBuckets = append([]*proofNonceBucket{bucket}, pc.proofsByNonceBuckets...)
}

func (pc *proofsCache) cleanupProofsBehindNonce(nonce uint64) {
if nonce == 0 {
return
Expand All @@ -66,16 +82,22 @@ func (pc *proofsCache) cleanupProofsBehindNonce(nonce uint64) {
pc.mutProofsCache.Lock()
defer pc.mutProofsCache.Unlock()

proofsByNonce := make([]*proofNonceMapping, 0)
buckets := make([]*proofNonceBucket, 0)

for _, proofInfo := range pc.proofsByNonce {
if proofInfo.nonce < nonce {
delete(pc.proofsByHash, proofInfo.headerHash)
for _, bucket := range pc.proofsByNonceBuckets {
if nonce > bucket.maxNonce {
pc.cleanupProofsInBucket(bucket)
continue
}

proofsByNonce = append(proofsByNonce, proofInfo)
buckets = append(buckets, bucket)
}

pc.proofsByNonce = proofsByNonce
pc.proofsByNonceBuckets = buckets
}

func (pc *proofsCache) cleanupProofsInBucket(bucket *proofNonceBucket) {
for _, proofInfo := range bucket.proofsByNonce {
delete(pc.proofsByHash, proofInfo.headerHash)
}
}
66 changes: 66 additions & 0 deletions dataRetriever/dataPool/proofsCache/proofsCache_bench_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package proofscache_test
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we move the test to package proofscache, maybe we can drop some code from the export_test file. Can also stay as it is.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i would keep it in _test package since there are other useful functions from test

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

and we plan to make this component more generic and to export it in other packages as well (for headers pool)


import (
"fmt"
"testing"

proofscache "github.com/multiversx/mx-chain-go/dataRetriever/dataPool/proofsCache"
)

func Benchmark_AddProof_Bucket10_Pool1000(b *testing.B) {
benchmarkAddProof(b, 10, 1000)
}

func Benchmark_AddProof_Bucket100_Pool10000(b *testing.B) {
benchmarkAddProof(b, 100, 10000)
}

func Benchmark_AddProof_Bucket1000_Pool100000(b *testing.B) {
benchmarkAddProof(b, 1000, 100000)
}
Comment on lines +10 to +20
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe also add some output / statistics in the PR description.


func benchmarkAddProof(b *testing.B, bucketSize int, nonceRange int) {
pc := proofscache.NewProofsCache(bucketSize)

b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
proof := generateProof()
nonce := generateRandomNonce(int64(nonceRange))

proof.HeaderNonce = nonce
proof.HeaderHash = []byte("hash_" + fmt.Sprintf("%d", nonce))
b.StartTimer()

pc.AddProof(proof)
}
}

func Benchmark_CleanupProofs_Bucket10_Pool1000(b *testing.B) {
benchmarkCleanupProofs(b, 10, 1000)
}

func Benchmark_CleanupProofs_Bucket100_Pool10000(b *testing.B) {
benchmarkCleanupProofs(b, 100, 10000)
}

func Benchmark_CleanupProofs_Bucket1000_Pool100000(b *testing.B) {
benchmarkCleanupProofs(b, 1000, 100000)
}

func benchmarkCleanupProofs(b *testing.B, bucketSize int, nonceRange int) {
pc := proofscache.NewProofsCache(bucketSize)

for i := uint64(0); i < uint64(nonceRange); i++ {
proof := generateProof()
proof.HeaderNonce = i
proof.HeaderHash = []byte("hash_" + fmt.Sprintf("%d", i))

pc.AddProof(proof)
}

b.ResetTimer()
for i := 0; i < b.N; i++ {
pc.CleanupProofsBehindNonce(uint64(nonceRange))
}
}
Loading