Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions consensus/spos/bls/proxy/subroundsHandler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ func getDefaultArgumentsSubroundHandler() (*SubroundsHandlerArgs, *spos.Consensu
consensusCore.SetEnableEpochsHandler(epochsEnable)
consensusCore.SetEquivalentProofsPool(&dataRetriever.ProofsPoolMock{})
consensusCore.SetEpochNotifier(epochNotifier)
consensusCore.SetInvalidSignersCache(&consensus.InvalidSignersCacheMock{})
handlerArgs.ConsensusCoreHandler = consensusCore

return handlerArgs, consensusCore
Expand Down
4 changes: 2 additions & 2 deletions consensus/spos/bls/v2/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ func (sr *subroundEndRound) ReceivedInvalidSignersInfo(cnsDta *consensus.Message
}

// VerifyInvalidSigners calls the unexported verifyInvalidSigners function
func (sr *subroundEndRound) VerifyInvalidSigners(invalidSigners []byte) error {
func (sr *subroundEndRound) VerifyInvalidSigners(invalidSigners []byte) ([]string, error) {
return sr.verifyInvalidSigners(invalidSigners)
}

Expand All @@ -313,7 +313,7 @@ func (sr *subroundEndRound) GetMinConsensusGroupIndexOfManagedKeys() int {

// CreateAndBroadcastInvalidSigners calls the unexported createAndBroadcastInvalidSigners function
func (sr *subroundEndRound) CreateAndBroadcastInvalidSigners(invalidSigners []byte) {
sr.createAndBroadcastInvalidSigners(invalidSigners)
sr.createAndBroadcastInvalidSigners(invalidSigners, nil)
}

// GetFullMessagesForInvalidSigners calls the unexported getFullMessagesForInvalidSigners function
Expand Down
55 changes: 35 additions & 20 deletions consensus/spos/bls/v2/subroundEndRound.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,14 +150,21 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta
return false
}

err := sr.verifyInvalidSigners(cnsDta.InvalidSigners)
invalidSignersCache := sr.InvalidSignersCache()
if invalidSignersCache.HasInvalidSigners(cnsDta.BlockHeaderHash, cnsDta.InvalidSigners) {
return false
}

invalidSignersPubKeys, err := sr.verifyInvalidSigners(cnsDta.InvalidSigners)
if err != nil {
log.Trace("receivedInvalidSignersInfo.verifyInvalidSigners", "error", err.Error())
return false
}

log.Debug("step 3: invalid signers info has been evaluated")

invalidSignersCache.AddInvalidSigners(cnsDta.BlockHeaderHash, cnsDta.InvalidSigners, invalidSignersPubKeys)

sr.PeerHonestyHandler().ChangeScore(
messageSender,
spos.GetConsensusTopicID(sr.ShardCoordinator()),
Expand All @@ -167,32 +174,37 @@ func (sr *subroundEndRound) receivedInvalidSignersInfo(_ context.Context, cnsDta
return true
}

func (sr *subroundEndRound) verifyInvalidSigners(invalidSigners []byte) error {
func (sr *subroundEndRound) verifyInvalidSigners(invalidSigners []byte) ([]string, error) {
messages, err := sr.MessageSigningHandler().Deserialize(invalidSigners)
if err != nil {
return err
return nil, err
}

pubKeys := make([]string, 0, len(messages))
for _, msg := range messages {
err = sr.verifyInvalidSigner(msg)
if err != nil {
return err
pubKey, errVerify := sr.verifyInvalidSigner(msg)
if errVerify != nil {
return nil, errVerify
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thus, here we break & return, all good?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, it means we were not able to verify if the provided invalid signers indeed had invalid signatures

}

if len(pubKey) > 0 {
pubKeys = append(pubKeys, pubKey)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So we only collect the public keys of the good signers? And if there's any invalid one, we collect nothing. Should we find a better name for the function e.g. findGoodSigners?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not really, this method checks whether the invalid signers provided an invalid signature.. so this appends the invalid signers that we confirmed they had an invalid signature

}
}

return nil
return pubKeys, nil
}

func (sr *subroundEndRound) verifyInvalidSigner(msg p2p.MessageP2P) error {
func (sr *subroundEndRound) verifyInvalidSigner(msg p2p.MessageP2P) (string, error) {
err := sr.MessageSigningHandler().Verify(msg)
if err != nil {
return err
return "", err
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note to ourselves - this is the raw byte pubkey, not the hex-encoded one (from what I understand). No change needed.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

correct

}

cnsMsg := &consensus.Message{}
err = sr.Marshalizer().Unmarshal(cnsMsg, msg.Data())
if err != nil {
return err
return "", err
}

err = sr.SigningHandler().VerifySingleSignature(cnsMsg.PubKey, cnsMsg.BlockHeaderHash, cnsMsg.SignatureShare)
Expand All @@ -203,9 +215,11 @@ func (sr *subroundEndRound) verifyInvalidSigner(msg p2p.MessageP2P) error {
"error", err.Error(),
)
sr.applyBlacklistOnNode(msg.Peer())

return string(cnsMsg.PubKey), nil
}

return nil
return "", nil
}

func (sr *subroundEndRound) applyBlacklistOnNode(peer core.PeerID) {
Expand Down Expand Up @@ -501,24 +515,23 @@ func (sr *subroundEndRound) handleInvalidSignersOnAggSigFail() ([]byte, []byte,
invalidPubKeys, err := sr.verifyNodesOnAggSigFail(ctx)
cancel()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not related to the changes from this PR, but this cancel() doesn't seem right to call it after the "intensive". Maybe move it up 1 line and call it on a defer?

if err != nil {
log.Debug("doEndRoundJobByNode.verifyNodesOnAggSigFail", "error", err.Error())
log.Debug("handleInvalidSignersOnAggSigFail.verifyNodesOnAggSigFail", "error", err.Error())
return nil, nil, err
}

_, err = sr.getFullMessagesForInvalidSigners(invalidPubKeys)
invalidSigners, err := sr.getFullMessagesForInvalidSigners(invalidPubKeys)
if err != nil {
log.Debug("doEndRoundJobByNode.getFullMessagesForInvalidSigners", "error", err.Error())
log.Debug("handleInvalidSignersOnAggSigFail.getFullMessagesForInvalidSigners", "error", err.Error())
return nil, nil, err
}

// TODO: handle invalid signers broadcast without flooding the network
// if len(invalidSigners) > 0 {
// sr.createAndBroadcastInvalidSigners(invalidSigners)
// }
if len(invalidSigners) > 0 {
sr.createAndBroadcastInvalidSigners(invalidSigners, invalidPubKeys)
}

bitmap, sig, err := sr.computeAggSigOnValidNodes()
if err != nil {
log.Debug("doEndRoundJobByNode.computeAggSigOnValidNodes", "error", err.Error())
log.Debug("handleInvalidSignersOnAggSigFail.computeAggSigOnValidNodes", "error", err.Error())
return nil, nil, err
}

Expand Down Expand Up @@ -618,7 +631,7 @@ func (sr *subroundEndRound) getRandomManagedKeyProofSender() string {
return randManagedKey
}

func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte) {
func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []byte, invalidSignersPubKeys []string) {
if !sr.ShouldConsiderSelfKeyInConsensus() {
return
}
Expand Down Expand Up @@ -646,6 +659,8 @@ func (sr *subroundEndRound) createAndBroadcastInvalidSigners(invalidSigners []by
invalidSigners,
)

sr.InvalidSignersCache().AddInvalidSigners(sr.GetData(), invalidSigners, invalidSignersPubKeys)

err = sr.BroadcastMessenger().BroadcastConsensusMessage(cnsMsg)
if err != nil {
log.Debug("doEndRoundJob.BroadcastConsensusMessage", "error", err.Error())
Expand Down
54 changes: 45 additions & 9 deletions consensus/spos/bls/v2/subroundEndRound_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1502,6 +1502,27 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) {
res := sr.ReceivedInvalidSignersInfo(&cnsData)
assert.False(t, res)
})
t.Run("invalid signers cache already has this message", func(t *testing.T) {
t.Parallel()

container := consensusMocks.InitConsensusCore()
invalidSignersCache := &consensusMocks.InvalidSignersCacheMock{
HasInvalidSignersCalled: func(headerHash []byte, invalidSigners []byte) bool {
return true
},
}
container.SetInvalidSignersCache(invalidSignersCache)

sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{})
cnsData := consensus.Message{
BlockHeaderHash: []byte("X"),
PubKey: []byte("A"),
InvalidSigners: []byte("invalidSignersData"),
}

res := sr.ReceivedInvalidSignersInfo(&cnsData)
assert.False(t, res)
})
t.Run("invalid signers data", func(t *testing.T) {
t.Parallel()

Expand All @@ -1528,6 +1549,13 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) {
t.Parallel()

container := consensusMocks.InitConsensusCore()
wasAddInvalidSignersCalled := false
invalidSignersCache := &consensusMocks.InvalidSignersCacheMock{
AddInvalidSignersCalled: func(headerHash []byte, invalidSigners []byte, invalidPublicKeys []string) {
wasAddInvalidSignersCalled = true
},
}
container.SetInvalidSignersCache(invalidSignersCache)

sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{})
sr.SetHeader(&block.HeaderV2{
Expand All @@ -1541,6 +1569,7 @@ func TestSubroundEndRound_ReceivedInvalidSignersInfo(t *testing.T) {

res := sr.ReceivedInvalidSignersInfo(&cnsData)
assert.True(t, res)
require.True(t, wasAddInvalidSignersCalled)
})
}

Expand All @@ -1552,7 +1581,6 @@ func TestVerifyInvalidSigners(t *testing.T) {

container := consensusMocks.InitConsensusCore()

expectedErr := errors.New("expected err")
messageSigningHandler := &mock.MessageSigningHandlerStub{
DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) {
return nil, expectedErr
Expand All @@ -1563,7 +1591,7 @@ func TestVerifyInvalidSigners(t *testing.T) {

sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{})

err := sr.VerifyInvalidSigners([]byte{})
_, err := sr.VerifyInvalidSigners([]byte{})
require.Equal(t, expectedErr, err)
})

Expand All @@ -1577,7 +1605,6 @@ func TestVerifyInvalidSigners(t *testing.T) {
}}
invalidSignersBytes, _ := container.Marshalizer().Marshal(invalidSigners)

expectedErr := errors.New("expected err")
messageSigningHandler := &mock.MessageSigningHandlerStub{
DeserializeCalled: func(messagesBytes []byte) ([]p2p.MessageP2P, error) {
require.Equal(t, invalidSignersBytes, messagesBytes)
Expand All @@ -1592,7 +1619,7 @@ func TestVerifyInvalidSigners(t *testing.T) {

sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{})

err := sr.VerifyInvalidSigners(invalidSignersBytes)
_, err := sr.VerifyInvalidSigners(invalidSignersBytes)
require.Equal(t, expectedErr, err)
})

Expand Down Expand Up @@ -1634,7 +1661,7 @@ func TestVerifyInvalidSigners(t *testing.T) {

sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{})

err := sr.VerifyInvalidSigners(invalidSignersBytes)
_, err := sr.VerifyInvalidSigners(invalidSignersBytes)
require.Nil(t, err)
require.True(t, wasCalled)
})
Expand Down Expand Up @@ -1662,7 +1689,7 @@ func TestVerifyInvalidSigners(t *testing.T) {

sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{})

err := sr.VerifyInvalidSigners(invalidSignersBytes)
_, err := sr.VerifyInvalidSigners(invalidSignersBytes)
require.Nil(t, err)
})
}
Expand Down Expand Up @@ -1704,25 +1731,34 @@ func TestSubroundEndRound_CreateAndBroadcastInvalidSigners(t *testing.T) {

expectedInvalidSigners := []byte("invalid signers")

wasCalled := false
wasBroadcastConsensusMessageCalled := false
container := consensusMocks.InitConsensusCore()
messenger := &consensusMocks.BroadcastMessengerMock{
BroadcastConsensusMessageCalled: func(message *consensus.Message) error {
assert.Equal(t, expectedInvalidSigners, message.InvalidSigners)
wasCalled = true
wasBroadcastConsensusMessageCalled = true
wg.Done()
return nil
},
}
container.SetBroadcastMessenger(messenger)

wasAddInvalidSignersCalled := false
invalidSignersCache := &consensusMocks.InvalidSignersCacheMock{
AddInvalidSignersCalled: func(headerHash []byte, invalidSigners []byte, invalidPublicKeys []string) {
wasAddInvalidSignersCalled = true
},
}
container.SetInvalidSignersCache(invalidSignersCache)
sr := initSubroundEndRoundWithContainer(container, &statusHandler.AppStatusHandlerStub{})
sr.SetSelfPubKey("A")

sr.CreateAndBroadcastInvalidSigners(expectedInvalidSigners)

wg.Wait()

require.True(t, wasCalled)
require.True(t, wasBroadcastConsensusMessageCalled)
require.True(t, wasAddInvalidSignersCalled)
})
}

Expand Down
1 change: 1 addition & 0 deletions consensus/spos/bls/v2/subroundStartRound.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ func (sr *subroundStartRound) doStartRoundJob(_ context.Context) bool {
topic := spos.GetConsensusTopicID(sr.ShardCoordinator())
sr.GetAntiFloodHandler().ResetForTopic(topic)
sr.worker.ResetConsensusMessages()
sr.worker.ResetInvalidSignersCache()

return true
}
Expand Down
13 changes: 13 additions & 0 deletions consensus/spos/consensusCore.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ type ConsensusCore struct {
enableEpochsHandler common.EnableEpochsHandler
equivalentProofsPool consensus.EquivalentProofsPool
epochNotifier process.EpochNotifier
invalidSignersCache InvalidSignersCache
}

// ConsensusCoreArgs store all arguments that are needed to create a ConsensusCore object
Expand Down Expand Up @@ -72,6 +73,7 @@ type ConsensusCoreArgs struct {
EnableEpochsHandler common.EnableEpochsHandler
EquivalentProofsPool consensus.EquivalentProofsPool
EpochNotifier process.EpochNotifier
InvalidSignersCache InvalidSignersCache
}

// NewConsensusCore creates a new ConsensusCore instance
Expand Down Expand Up @@ -104,6 +106,7 @@ func NewConsensusCore(
enableEpochsHandler: args.EnableEpochsHandler,
equivalentProofsPool: args.EquivalentProofsPool,
epochNotifier: args.EpochNotifier,
invalidSignersCache: args.InvalidSignersCache,
}

err := ValidateConsensusCore(consensusCore)
Expand Down Expand Up @@ -239,6 +242,11 @@ func (cc *ConsensusCore) EquivalentProofsPool() consensus.EquivalentProofsPool {
return cc.equivalentProofsPool
}

// InvalidSignersCache returns the invalid signers cache component
func (cc *ConsensusCore) InvalidSignersCache() InvalidSignersCache {
return cc.invalidSignersCache
}

// SetBlockchain sets blockchain handler
func (cc *ConsensusCore) SetBlockchain(blockChain data.ChainHandler) {
cc.blockChain = blockChain
Expand Down Expand Up @@ -364,6 +372,11 @@ func (cc *ConsensusCore) SetEpochNotifier(epochNotifier process.EpochNotifier) {
cc.epochNotifier = epochNotifier
}

// SetInvalidSignersCache sets the invalid signers cache
func (cc *ConsensusCore) SetInvalidSignersCache(cache InvalidSignersCache) {
cc.invalidSignersCache = cache
}

// IsInterfaceNil returns true if there is no value under the interface
func (cc *ConsensusCore) IsInterfaceNil() bool {
return cc == nil
Expand Down
3 changes: 3 additions & 0 deletions consensus/spos/consensusCoreValidator.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,9 @@ func ValidateConsensusCore(container ConsensusCoreHandler) error {
if check.IfNil(container.EpochStartRegistrationHandler()) {
return ErrNilEpochStartNotifier
}
if check.IfNil(container.InvalidSignersCache()) {
return ErrNilInvalidSignersCache
}

return nil
}
13 changes: 13 additions & 0 deletions consensus/spos/consensusCoreValidator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ func initConsensusDataContainer() *spos.ConsensusCore {
enableEpochsHandler := &enableEpochsHandlerMock.EnableEpochsHandlerStub{}
proofsPool := &dataRetriever.ProofsPoolMock{}
epochNotifier := &epochNotifierMock.EpochNotifierStub{}
invalidSignersCache := &consensusMocks.InvalidSignersCacheMock{}

consensusCore, _ := spos.NewConsensusCore(&spos.ConsensusCoreArgs{
BlockChain: blockChain,
Expand Down Expand Up @@ -73,6 +74,7 @@ func initConsensusDataContainer() *spos.ConsensusCore {
EnableEpochsHandler: enableEpochsHandler,
EquivalentProofsPool: proofsPool,
EpochNotifier: epochNotifier,
InvalidSignersCache: invalidSignersCache,
})

return consensusCore
Expand Down Expand Up @@ -372,6 +374,17 @@ func TestConsensusContainerValidator_ValidateNilEpochStartRegistrationHandlerSho
assert.Equal(t, spos.ErrNilEpochStartNotifier, err)
}

func TestConsensusContainerValidator_ValidateNilInvalidSignersCacheShouldFail(t *testing.T) {
t.Parallel()

container := initConsensusDataContainer()
container.SetInvalidSignersCache(nil)

err := spos.ValidateConsensusCore(container)

assert.Equal(t, spos.ErrNilInvalidSignersCache, err)
}

func TestConsensusContainerValidator_ShouldWork(t *testing.T) {
t.Parallel()

Expand Down
Loading