From c2da67db92f8d1f00dc4525b2e05aa4e09e6d28e Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 6 Aug 2024 14:22:13 -0700 Subject: [PATCH 01/98] Bump avalanchego to master --- go.mod | 4 +-- go.sum | 8 ++--- params/config.go | 6 ++-- params/network_upgrades.go | 7 +++-- peer/network_test.go | 3 +- plugin/evm/syncervm_test.go | 5 ++-- plugin/evm/tx_gossip_test.go | 16 +++++----- plugin/evm/vm_test.go | 17 ++++++----- plugin/evm/vm_upgrade_bytes_test.go | 9 +++--- plugin/evm/vm_warp_test.go | 5 ++-- precompile/contracts/warp/predicate_test.go | 7 +++-- scripts/versions.sh | 2 +- tests/load/load_test.go | 2 ++ tests/warp/warp_test.go | 33 ++++++++++++++------- utils/snow.go | 4 +-- 15 files changed, 76 insertions(+), 52 deletions(-) diff --git a/go.mod b/go.mod index 7956a35752..7a5de639cd 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21.12 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.11.11-0.20240729205337-a0f7e422bb84 + github.com/ava-labs/avalanchego v1.11.11-0.20240805202431-479145a6602d github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -54,7 +54,7 @@ require ( require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/ava-labs/coreth v0.13.7 // indirect + github.com/ava-labs/coreth v0.13.8-0.20240802110637-b3e5088d062d // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect diff --git a/go.sum b/go.sum index a33e68a2f1..549db74bb0 100644 --- a/go.sum +++ b/go.sum @@ -56,10 +56,10 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.11-0.20240729205337-a0f7e422bb84 h1:AmPZLlnVREbJ/viK/hDTIVn1bqX8QTB2CFtrBxHwnsw= -github.com/ava-labs/avalanchego v1.11.11-0.20240729205337-a0f7e422bb84/go.mod h1:POgZPryqe80OeHCDNrXrPOKoFre736iFuMgmUBeKaLc= -github.com/ava-labs/coreth v0.13.7 h1:k8T9u/ROifl8f7oXjHRc1KvSISRl9txvy7gGVmHEz6g= -github.com/ava-labs/coreth v0.13.7/go.mod h1:tXDujonxXFOF6oK5HS2EmgtSXJK3Gy6RpZxb5WzR9rM= +github.com/ava-labs/avalanchego v1.11.11-0.20240805202431-479145a6602d h1:T8sDX5uo7zSEjwDtVccS1WtzuC3pRXs9NXYbmGGagJ4= +github.com/ava-labs/avalanchego v1.11.11-0.20240805202431-479145a6602d/go.mod h1:9e0UPXJboybmgFjeTj+SFbK4ugbrdG4t68VdiUW5oQ8= +github.com/ava-labs/coreth v0.13.8-0.20240802110637-b3e5088d062d h1:klPTcKVvqfA2KSKaRvQAO56Pd4XAqGhwgMTQ6/W+w7w= +github.com/ava-labs/coreth v0.13.8-0.20240802110637-b3e5088d062d/go.mod h1:tXDujonxXFOF6oK5HS2EmgtSXJK3Gy6RpZxb5WzR9rM= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= diff --git a/params/config.go b/params/config.go index 59ae91a869..356b208f2e 100644 --- a/params/config.go +++ b/params/config.go @@ -33,8 +33,8 @@ import ( "math/big" "time" + "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/subnet-evm/commontype" "github.com/ava-labs/subnet-evm/precompile/modules" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" @@ -57,7 +57,7 @@ var ( DynamicFeeExtraDataSize = 80 RollupWindow uint64 = 10 - DefaultGenesisTime = version.DefaultUpgradeTime + DefaultGenesisTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) DefaultFeeConfig = commontype.FeeConfig{ GasLimit: big.NewInt(8_000_000), @@ -110,7 +110,7 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), - CancunTime: utils.TimeToNewUint64(version.GetEUpgradeTime(constants.UnitTestID)), + CancunTime: utils.TimeToNewUint64(upgrade.GetConfig(constants.UnitTestID).EtnaTime), NetworkUpgrades: getDefaultNetworkUpgrades(constants.UnitTestID), GenesisPrecompiles: Precompiles{}, UpgradeConfig: UpgradeConfig{}, diff --git a/params/network_upgrades.go b/params/network_upgrades.go index 7dcf15e911..013367c947 100644 --- a/params/network_upgrades.go +++ b/params/network_upgrades.go @@ -7,7 +7,7 @@ import ( "fmt" "reflect" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/subnet-evm/utils" ) @@ -143,10 +143,11 @@ func (n *NetworkUpgrades) GetAvalancheRules(time uint64) AvalancheRules { // getDefaultNetworkUpgrades returns the network upgrades for the specified network ID. // These should not return nil values. func getDefaultNetworkUpgrades(networkID uint32) NetworkUpgrades { + agoUpgrade := upgrade.GetConfig(networkID) return NetworkUpgrades{ SubnetEVMTimestamp: utils.NewUint64(0), - DurangoTimestamp: utils.TimeToNewUint64(version.GetDurangoTime(networkID)), - EUpgradeTimestamp: utils.TimeToNewUint64(version.GetEUpgradeTime(networkID)), + DurangoTimestamp: utils.TimeToNewUint64(agoUpgrade.DurangoTime), + EUpgradeTimestamp: utils.TimeToNewUint64(agoUpgrade.EtnaTime), } } diff --git a/peer/network_test.go b/peer/network_test.go index 89419314cc..2ebcf6775a 100644 --- a/peer/network_test.go +++ b/peer/network_test.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" ethcommon "github.com/ethereum/go-ethereum/common" @@ -510,7 +511,7 @@ func TestHandleInvalidMessages(t *testing.T) { nodeID := ids.GenerateTestNodeID() requestID := uint32(1) - sender := &common.SenderTest{ + sender := &enginetest.Sender{ SendAppErrorF: func(context.Context, ids.NodeID, uint32, int32, string) error { return nil }, diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index a81474c072..533740666b 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -20,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/set" @@ -124,7 +125,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { test.expectedErr = nil syncDisabledVM := &VM{} - appSender := &commonEng.SenderTest{T: t} + appSender := &enginetest.Sender{T: t} appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() @@ -368,7 +369,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest, numBlocks int) *s // off of a server VM. type syncVMSetup struct { serverVM *VM - serverAppSender *commonEng.SenderTest + serverAppSender *enginetest.Sender fundedAccounts map[*keystore.Key]*types.StateAccount diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 1f04e223ce..36c47ffde5 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -18,7 +18,9 @@ import ( "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -35,10 +37,10 @@ func TestEthTxGossip(t *testing.T) { require := require.New(t) ctx := context.Background() snowCtx := utils.TestSnowContext() - validatorState := &validators.TestState{} + validatorState := &validatorstest.State{} snowCtx.ValidatorState = validatorState - responseSender := &common.FakeSender{ + responseSender := &enginetest.SenderStub{ SentAppResponse: make(chan []byte, 1), } vm := &VM{ @@ -54,7 +56,7 @@ func TestEthTxGossip(t *testing.T) { nil, make(chan common.Message), nil, - &common.SenderTest{}, + &enginetest.Sender{}, )) require.NoError(vm.SetState(ctx, snow.NormalOp)) @@ -63,7 +65,7 @@ func TestEthTxGossip(t *testing.T) { }() // sender for the peer requesting gossip from [vm] - peerSender := &common.FakeSender{ + peerSender := &enginetest.SenderStub{ SentAppRequest: make(chan []byte, 1), } @@ -153,7 +155,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { require := require.New(t) ctx := context.Background() snowCtx := utils.TestSnowContext() - snowCtx.ValidatorState = &validators.TestState{ + snowCtx.ValidatorState = &validatorstest.State{ GetCurrentHeightF: func(context.Context) (uint64, error) { return 0, nil }, @@ -161,7 +163,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { return nil, nil }, } - sender := &common.FakeSender{ + sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 1), } @@ -217,7 +219,7 @@ func TestEthTxPushGossipInbound(t *testing.T) { ctx := context.Background() snowCtx := utils.TestSnowContext() - sender := &common.SenderTest{} + sender := &enginetest.Sender{} vm := &VM{ ethTxPullGossiper: gossip.NoOpGossiper{}, } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 2a0e5849c2..758fbbfd1d 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -33,7 +33,8 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" avalancheConstants "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/formatting" @@ -152,7 +153,7 @@ func NewContext() *snow.Context { _ = aliaser.Alias(testCChainID, testCChainID.String()) _ = aliaser.Alias(testXChainID, "X") _ = aliaser.Alias(testXChainID, testXChainID.String()) - ctx.ValidatorState = &validators.TestState{ + ctx.ValidatorState = &validatorstest.State{ GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { subnetID, ok := map[ids.ID]ids.ID{ avalancheConstants.PlatformChainID: avalancheConstants.PrimaryNetworkID, @@ -221,13 +222,15 @@ func GenesisVM(t *testing.T, genesisJSON string, configJSON string, upgradeJSON string, -) (chan commonEng.Message, - *VM, database.Database, - *commonEng.SenderTest, +) ( + chan commonEng.Message, + *VM, + database.Database, + *enginetest.Sender, ) { vm := &VM{} ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, genesisJSON) - appSender := &commonEng.SenderTest{T: t} + appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } err := vm.Initialize( @@ -1934,7 +1937,7 @@ func TestConfigureLogLevel(t *testing.T) { t.Run(test.name, func(t *testing.T) { vm := &VM{} ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) - appSender := &commonEng.SenderTest{T: t} + appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } err := vm.Initialize( diff --git a/plugin/evm/vm_upgrade_bytes_test.go b/plugin/evm/vm_upgrade_bytes_test.go index 6b0d4ad7a4..85d8d84885 100644 --- a/plugin/evm/vm_upgrade_bytes_test.go +++ b/plugin/evm/vm_upgrade_bytes_test.go @@ -14,7 +14,8 @@ import ( "github.com/ava-labs/avalanchego/snow" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/core/types" @@ -32,7 +33,7 @@ import ( ) var ( - DefaultEUpgradeTime = uint64(version.GetEUpgradeTime(testNetworkID).Unix()) + DefaultEUpgradeTime = uint64(upgrade.GetConfig(testNetworkID).EtnaTime.Unix()) ) func TestVMUpgradeBytesPrecompile(t *testing.T) { @@ -183,7 +184,7 @@ func TestNetworkUpgradesOverriden(t *testing.T) { vm := &VM{} ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisBytes)) - appSender := &commonEng.SenderTest{T: t} + appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } err = vm.Initialize( @@ -212,7 +213,7 @@ func TestNetworkUpgradesOverriden(t *testing.T) { require.False(t, vm.chainConfig.IsSubnetEVM(0)) require.True(t, vm.chainConfig.IsSubnetEVM(2)) require.False(t, vm.chainConfig.IsDurango(0)) - require.False(t, vm.chainConfig.IsDurango(uint64(version.DefaultUpgradeTime.Unix()))) + require.False(t, vm.chainConfig.IsDurango(uint64(params.DefaultGenesisTime.Unix()))) require.True(t, vm.chainConfig.IsDurango(1607144402)) } diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index d90826a158..d5c46e540c 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" avagoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/set" @@ -278,7 +279,7 @@ func testWarpVMTransaction(t *testing.T, unsignedMessage *avalancheWarp.Unsigned minimumValidPChainHeight := uint64(10) getValidatorSetTestErr := errors.New("can't get validator set test error") - vm.ctx.ValidatorState = &validators.TestState{ + vm.ctx.ValidatorState = &validatorstest.State{ // TODO: test both Primary Network / C-Chain and non-Primary Network GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { return ids.Empty, nil @@ -448,7 +449,7 @@ func TestReceiveWarpMessage(t *testing.T) { minimumValidPChainHeight := uint64(10) getValidatorSetTestErr := errors.New("can't get validator set test error") - vm.ctx.ValidatorState = &validators.TestState{ + vm.ctx.ValidatorState = &validatorstest.State{ GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { return ids.Empty, nil }, diff --git a/precompile/contracts/warp/predicate_test.go b/precompile/contracts/warp/predicate_test.go index 2b293b97a9..113db6fc43 100644 --- a/precompile/contracts/warp/predicate_test.go +++ b/precompile/contracts/warp/predicate_test.go @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -197,7 +198,7 @@ func createSnowCtx(validatorRanges []validatorRange) *snow.Context { } snowCtx := utils.TestSnowContext() - state := &validators.TestState{ + state := &validatorstest.State{ GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { return sourceSubnetID, nil }, @@ -266,7 +267,7 @@ func TestWarpMessageFromPrimaryNetwork(t *testing.T) { snowCtx.ChainID = ids.GenerateTestID() snowCtx.CChainID = cChainID snowCtx.NetworkID = networkID - snowCtx.ValidatorState = &validators.TestState{ + snowCtx.ValidatorState = &validatorstest.State{ GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { require.Equal(chainID, cChainID) return constants.PrimaryNetworkID, nil // Return Primary Network SubnetID @@ -666,7 +667,7 @@ func initWarpPredicateTests() { snowCtx := utils.TestSnowContext() snowCtx.NetworkID = networkID - state := &validators.TestState{ + state := &validatorstest.State{ GetSubnetIDF: func(ctx context.Context, chainID ids.ID) (ids.ID, error) { return sourceSubnetID, nil }, diff --git a/scripts/versions.sh b/scripts/versions.sh index 4bb7ec71b9..a1bcaf654f 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'v1.11.10'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'479145a6602dfc6263c3d7842d26d7c7be7d5991'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier diff --git a/tests/load/load_test.go b/tests/load/load_test.go index c8f80915c4..aa5a14783b 100644 --- a/tests/load/load_test.go +++ b/tests/load/load_test.go @@ -53,10 +53,12 @@ var _ = ginkgo.Describe("[Load Simulator]", ginkgo.Ordered, func() { var env *e2e.TestEnvironment ginkgo.BeforeAll(func() { + tc := e2e.NewTestContext() genesisPath := filepath.Join(repoRootPath, "tests/load/genesis/genesis.json") nodes := utils.NewTmpnetNodes(nodeCount) env = e2e.NewTestEnvironment( + tc, flagVars, utils.NewTmpnetNetwork( "subnet-evm-small-load", diff --git a/tests/warp/warp_test.go b/tests/warp/warp_test.go index 54f27a47fc..57567ae451 100644 --- a/tests/warp/warp_test.go +++ b/tests/warp/warp_test.go @@ -91,9 +91,11 @@ func TestE2E(t *testing.T) { var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run only once in the first ginkgo process + tc := e2e.NewTestContext() nodes := utils.NewTmpnetNodes(tmpnet.DefaultNodeCount) env := e2e.NewTestEnvironment( + tc, flagVars, utils.NewTmpnetNetwork( "subnet-evm-warp-e2e", @@ -109,13 +111,14 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Run in every ginkgo process require := require.New(ginkgo.GinkgoT()) + tc := e2e.NewTestContext() // Initialize the local test environment from the global state if len(envBytes) > 0 { - e2e.InitSharedTestEnvironment(envBytes) + e2e.InitSharedTestEnvironment(ginkgo.GinkgoT(), envBytes) } - network := e2e.Env.GetNetwork() + network := e2e.GetEnv(tc).GetNetwork() // By default all nodes are validating all subnets validatorURIs := make([]string, len(network.Nodes)) @@ -142,7 +145,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { } infoClient := info.NewClient(network.Nodes[0].URI) - cChainBlockchainID, err := infoClient.GetBlockchainID(e2e.DefaultContext(), "C") + cChainBlockchainID, err := infoClient.GetBlockchainID(tc.DefaultContext(), "C") require.NoError(err) cChainSubnetDetails = &Subnet{ @@ -155,7 +158,8 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { var _ = ginkgo.Describe("[Warp]", func() { testFunc := func(sendingSubnet *Subnet, receivingSubnet *Subnet) { - w := newWarpTest(e2e.DefaultContext(), sendingSubnet, receivingSubnet) + tc := e2e.NewTestContext() + w := newWarpTest(tc.DefaultContext(), sendingSubnet, receivingSubnet) log.Info("Sending message from A to B") w.sendMessageFromSendingSubnet() @@ -294,7 +298,8 @@ func (w *warpTest) getBlockHashAndNumberFromTxReceipt(ctx context.Context, clien } func (w *warpTest) sendMessageFromSendingSubnet() { - ctx := e2e.DefaultContext() + tc := e2e.NewTestContext() + ctx := tc.DefaultContext() require := require.New(ginkgo.GinkgoT()) client := w.sendingSubnetClients[0] @@ -375,7 +380,8 @@ func (w *warpTest) sendMessageFromSendingSubnet() { func (w *warpTest) aggregateSignaturesViaAPI() { require := require.New(ginkgo.GinkgoT()) - ctx := e2e.DefaultContext() + tc := e2e.NewTestContext() + ctx := tc.DefaultContext() warpAPIs := make(map[ids.NodeID]warpBackend.Client, len(w.sendingSubnetURIs)) for _, uri := range w.sendingSubnetURIs { @@ -434,7 +440,8 @@ func (w *warpTest) aggregateSignaturesViaAPI() { func (w *warpTest) aggregateSignatures() { require := require.New(ginkgo.GinkgoT()) - ctx := e2e.DefaultContext() + tc := e2e.NewTestContext() + ctx := tc.DefaultContext() // Verify that the signature aggregation matches the results of manually constructing the warp message client, err := warpBackend.NewClient(w.sendingSubnetURIs[0], w.sendingSubnet.BlockchainID.String()) @@ -457,7 +464,8 @@ func (w *warpTest) aggregateSignatures() { func (w *warpTest) deliverAddressedCallToReceivingSubnet() { require := require.New(ginkgo.GinkgoT()) - ctx := e2e.DefaultContext() + tc := e2e.NewTestContext() + ctx := tc.DefaultContext() client := w.receivingSubnetClients[0] log.Info("Subscribing to new heads") @@ -511,7 +519,8 @@ func (w *warpTest) deliverAddressedCallToReceivingSubnet() { func (w *warpTest) deliverBlockHashPayload() { require := require.New(ginkgo.GinkgoT()) - ctx := e2e.DefaultContext() + tc := e2e.NewTestContext() + ctx := tc.DefaultContext() client := w.receivingSubnetClients[0] log.Info("Subscribing to new heads") @@ -565,7 +574,8 @@ func (w *warpTest) deliverBlockHashPayload() { func (w *warpTest) executeHardHatTest() { require := require.New(ginkgo.GinkgoT()) - ctx := e2e.DefaultContext() + tc := e2e.NewTestContext() + ctx := tc.DefaultContext() client := w.sendingSubnetClients[0] log.Info("Subscribing to new heads") @@ -593,7 +603,8 @@ func (w *warpTest) executeHardHatTest() { func (w *warpTest) warpLoad() { require := require.New(ginkgo.GinkgoT()) - ctx := e2e.DefaultContext() + tc := e2e.NewTestContext() + ctx := tc.DefaultContext() var ( numWorkers = len(w.sendingSubnetClients) diff --git a/utils/snow.go b/utils/snow.go index 96c13708d3..92a1d236c1 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -7,7 +7,7 @@ import ( "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -28,6 +28,6 @@ func TestSnowContext() *snow.Context { BCLookup: ids.NewAliaser(), Metrics: metrics.NewMultiGatherer(), ChainDataDir: "", - ValidatorState: &validators.TestState{}, + ValidatorState: &validatorstest.State{}, } } From 33466ce39e99fc9d2da7d1f0b7cc7c7817046617 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 6 Aug 2024 16:22:04 -0700 Subject: [PATCH 02/98] always sign uptime messages (testing branch) --- go.mod | 2 +- go.sum | 4 +- plugin/evm/vm.go | 1 + plugin/evm/vm_warp_test.go | 6 +-- warp/backend.go | 54 +++++++++++++++++-- warp/backend_test.go | 12 ++--- warp/handlers/signature_request.go | 2 +- warp/handlers/signature_request_p2p.go | 20 ++++--- warp/handlers/signature_request_p2p_test.go | 4 +- warp/handlers/signature_request_test.go | 4 +- warp/handlers/validator_uptime_handler.go | 38 +++++++++++++ .../handlers/validator_uptime_handler_test.go | 35 ++++++++++++ warp/service.go | 2 +- 13 files changed, 154 insertions(+), 30 deletions(-) create mode 100644 warp/handlers/validator_uptime_handler.go create mode 100644 warp/handlers/validator_uptime_handler_test.go diff --git a/go.mod b/go.mod index 7a5de639cd..77f454cf97 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21.12 require ( github.com/VictoriaMetrics/fastcache v1.12.1 - github.com/ava-labs/avalanchego v1.11.11-0.20240805202431-479145a6602d + github.com/ava-labs/avalanchego v1.11.11-0.20240806190314-3e244010896c github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 549db74bb0..7a9622bc39 100644 --- a/go.sum +++ b/go.sum @@ -56,8 +56,8 @@ github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.11-0.20240805202431-479145a6602d h1:T8sDX5uo7zSEjwDtVccS1WtzuC3pRXs9NXYbmGGagJ4= -github.com/ava-labs/avalanchego v1.11.11-0.20240805202431-479145a6602d/go.mod h1:9e0UPXJboybmgFjeTj+SFbK4ugbrdG4t68VdiUW5oQ8= +github.com/ava-labs/avalanchego v1.11.11-0.20240806190314-3e244010896c h1:cORLkxcmTX41CRH5ugc+hd+GphyxNnDU1Qy4ShaP3rY= +github.com/ava-labs/avalanchego v1.11.11-0.20240806190314-3e244010896c/go.mod h1:9e0UPXJboybmgFjeTj+SFbK4ugbrdG4t68VdiUW5oQ8= github.com/ava-labs/coreth v0.13.8-0.20240802110637-b3e5088d062d h1:klPTcKVvqfA2KSKaRvQAO56Pd4XAqGhwgMTQ6/W+w7w= github.com/ava-labs/coreth v0.13.8-0.20240802110637-b3e5088d062d/go.mod h1:tXDujonxXFOF6oK5HS2EmgtSXJK3Gy6RpZxb5WzR9rM= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index ad380630e4..b12c99128e 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -497,6 +497,7 @@ func (vm *VM) Initialize( if err != nil { return err } + vm.warpBackend.AddMessageValidator(&handlers.ValidatorUptimeHandler{}) // clear warpdb on initialization if config enabled if vm.config.PruneWarpDB { diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index d5c46e540c..16f9893d6f 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -112,7 +112,7 @@ func TestSendWarpMessage(t *testing.T) { unsignedMessageID := unsignedMessage.ID() // Verify the signature cannot be fetched before the block is accepted - _, err = vm.warpBackend.GetMessageSignature(unsignedMessageID) + _, err = vm.warpBackend.GetMessageSignatureByID(unsignedMessageID) require.Error(err) _, err = vm.warpBackend.GetBlockSignature(blk.ID()) require.Error(err) @@ -122,7 +122,7 @@ func TestSendWarpMessage(t *testing.T) { vm.blockChain.DrainAcceptorQueue() // Verify the message signature after accepting the block. - rawSignatureBytes, err := vm.warpBackend.GetMessageSignature(unsignedMessageID) + rawSignatureBytes, err := vm.warpBackend.GetMessageSignatureByID(unsignedMessageID) require.NoError(err) blsSignature, err := bls.SignatureFromBytes(rawSignatureBytes[:]) require.NoError(err) @@ -595,7 +595,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { // Add the known message and get its signature to confirm. err = vm.warpBackend.AddMessage(warpMessage) require.NoError(t, err) - signature, err := vm.warpBackend.GetMessageSignature(warpMessage.ID()) + signature, err := vm.warpBackend.GetMessageSignatureByID(warpMessage.ID()) require.NoError(t, err) tests := map[string]struct { diff --git a/warp/backend.go b/warp/backend.go index 7e7377ad57..59b6283bd0 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -30,14 +30,28 @@ type BlockClient interface { GetAcceptedBlock(ctx context.Context, blockID ids.ID) (snowman.Block, error) } +type MessageValidator interface { + // If the validator returns nil, the message is considered valid and the + // backend will sign it. + ValidateMessage(*avalancheWarp.UnsignedMessage) error +} + // Backend tracks signature-eligible warp messages and provides an interface to fetch them. // The backend is also used to query for warp message signatures by the signature request handler. type Backend interface { // AddMessage signs [unsignedMessage] and adds it to the warp backend database AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error - // GetMessageSignature returns the signature of the requested message hash. - GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) + // AddMessageValidator adds a validator to the backend. The backend will sign + // messages that pass any of the validators, in addition to those known in the db. + AddMessageValidator(validator MessageValidator) + + // GetMessageSignatureByID returns the signature of the requested message. + GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) + + // GetMessageSignatureByID returns the signature of the requested message hash. + // TODO: should we deprecate this method? + GetMessageSignatureByID(messageID ids.ID) ([bls.SignatureLen]byte, error) // GetBlockSignature returns the signature of the requested message hash. GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) @@ -62,6 +76,7 @@ type backend struct { blockSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage + messageValidators []MessageValidator } // NewBackend creates a new Backend, and initializes the signature cache and message tracking database. @@ -88,6 +103,10 @@ func NewBackend( return b, b.initOffChainMessages(offchainMessages) } +func (b *backend) AddMessageValidator(validator MessageValidator) { + b.messageValidators = append(b.messageValidators, validator) +} + func (b *backend) initOffChainMessages(offchainMessages [][]byte) error { for i, offchainMsg := range offchainMessages { unsignedMsg, err := avalancheWarp.ParseUnsignedMessage(offchainMsg) @@ -142,15 +161,23 @@ func (b *backend) AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) err return nil } -func (b *backend) GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) { +func (b *backend) GetMessageSignature(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { + messageID := unsignedMessage.ID() + log.Debug("Getting warp message from backend", "messageID", messageID) if sig, ok := b.messageSignatureCache.Get(messageID); ok { return sig, nil } - unsignedMessage, err := b.GetMessage(messageID) + var err error + for _, v := range append(b.messageValidators, b) { + err := v.ValidateMessage(unsignedMessage) + if err == nil { + break + } + } if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to validate warp message: %w", err) } var signature [bls.SignatureLen]byte @@ -164,6 +191,23 @@ func (b *backend) GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, return signature, nil } +func (b *backend) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { + messageID := unsignedMessage.ID() + _, err := b.GetMessage(messageID) + if err != nil { + return fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + } + return nil +} + +func (b *backend) GetMessageSignatureByID(messageID ids.ID) ([bls.SignatureLen]byte, error) { + unsignedMessage, err := b.GetMessage(messageID) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get warp message %s: %w", messageID, err) + } + return b.GetMessageSignature(unsignedMessage) +} + func (b *backend) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { log.Debug("Getting block from backend", "blockID", blockID) if sig, ok := b.blockSignatureCache.Get(blockID); ok { diff --git a/warp/backend_test.go b/warp/backend_test.go index a262d760ef..789b73aa74 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -60,7 +60,7 @@ func TestClearDB(t *testing.T) { err = backend.AddMessage(unsignedMsg) require.NoError(t, err) // ensure that the message was added - _, err = backend.GetMessageSignature(messageID) + _, err = backend.GetMessageSignatureByID(messageID) require.NoError(t, err) } @@ -75,7 +75,7 @@ func TestClearDB(t *testing.T) { // ensure all messages have been deleted for _, messageID := range messageIDs { - _, err := backend.GetMessageSignature(messageID) + _, err := backend.GetMessageSignatureByID(messageID) require.ErrorContains(t, err, "failed to get warp message") } } @@ -95,7 +95,7 @@ func TestAddAndGetValidMessage(t *testing.T) { // Verify that a signature is returned successfully, and compare to expected signature. messageID := testUnsignedMessage.ID() - signature, err := backend.GetMessageSignature(messageID) + signature, err := backend.GetMessageSignatureByID(messageID) require.NoError(t, err) expectedSig, err := warpSigner.Sign(testUnsignedMessage) @@ -114,7 +114,7 @@ func TestAddAndGetUnknownMessage(t *testing.T) { // Try getting a signature for a message that was not added. messageID := testUnsignedMessage.ID() - _, err = backend.GetMessageSignature(messageID) + _, err = backend.GetMessageSignatureByID(messageID) require.Error(t, err) } @@ -163,7 +163,7 @@ func TestZeroSizedCache(t *testing.T) { // Verify that a signature is returned successfully, and compare to expected signature. messageID := testUnsignedMessage.ID() - signature, err := backend.GetMessageSignature(messageID) + signature, err := backend.GetMessageSignatureByID(messageID) require.NoError(t, err) expectedSig, err := warpSigner.Sign(testUnsignedMessage) @@ -192,7 +192,7 @@ func TestOffChainMessages(t *testing.T) { require.NoError(err) require.Equal(testUnsignedMessage.Bytes(), msg.Bytes()) - signature, err := b.GetMessageSignature(testUnsignedMessage.ID()) + signature, err := b.GetMessageSignatureByID(testUnsignedMessage.ID()) require.NoError(err) expectedSignatureBytes, err := warpSigner.Sign(msg) require.NoError(err) diff --git a/warp/handlers/signature_request.go b/warp/handlers/signature_request.go index cab7914243..25374de1ee 100644 --- a/warp/handlers/signature_request.go +++ b/warp/handlers/signature_request.go @@ -45,7 +45,7 @@ func (s *SignatureRequestHandler) OnMessageSignatureRequest(ctx context.Context, s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) }() - signature, err := s.backend.GetMessageSignature(signatureRequest.MessageID) + signature, err := s.backend.GetMessageSignatureByID(signatureRequest.MessageID) if err != nil { log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) s.stats.IncMessageSignatureMiss() diff --git a/warp/handlers/signature_request_p2p.go b/warp/handlers/signature_request_p2p.go index ecc72305af..227aaa1a31 100644 --- a/warp/handlers/signature_request_p2p.go +++ b/warp/handlers/signature_request_p2p.go @@ -28,6 +28,10 @@ const ( ErrFailedToMarshal ) +type AddressedCallHandler interface { + GetMessageSignature(*avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) +} + // SignatureRequestHandlerP2P serves warp signature requests using the p2p // framework from avalanchego. It is a peer.RequestHandler for // message.MessageSignatureRequest. @@ -35,6 +39,8 @@ type SignatureRequestHandlerP2P struct { backend warp.Backend codec codec.Manager stats *handlerStats + + addressedPayloadHandlers []AddressedCallHandler } func NewSignatureRequestHandlerP2P(backend warp.Backend, codec codec.Manager) *SignatureRequestHandlerP2P { @@ -45,6 +51,10 @@ func NewSignatureRequestHandlerP2P(backend warp.Backend, codec codec.Manager) *S } } +func (s *SignatureRequestHandlerP2P) AddAddressedCallHandler(handler AddressedCallHandler) { + s.addressedPayloadHandlers = append(s.addressedPayloadHandlers, handler) +} + func (s *SignatureRequestHandlerP2P) AppRequest( ctx context.Context, nodeID ids.NodeID, @@ -79,11 +89,7 @@ func (s *SignatureRequestHandlerP2P) AppRequest( var sig [bls.SignatureLen]byte switch p := parsed.(type) { case *payload.AddressedCall: - // Note we pass the unsigned message ID to GetMessageSignature since - // that is what the backend expects. - // However, we verify the types and format of the payload to ensure - // the message conforms to the ACP-118 spec. - sig, err = s.GetMessageSignature(unsignedMessage.ID()) + sig, err = s.GetMessageSignature(unsignedMessage) if err != nil { s.stats.IncMessageSignatureMiss() } else { @@ -122,7 +128,7 @@ func (s *SignatureRequestHandlerP2P) AppRequest( return respBytes, nil } -func (s *SignatureRequestHandlerP2P) GetMessageSignature(messageID ids.ID) ([bls.SignatureLen]byte, error) { +func (s *SignatureRequestHandlerP2P) GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { startTime := time.Now() s.stats.IncMessageSignatureRequest() @@ -131,7 +137,7 @@ func (s *SignatureRequestHandlerP2P) GetMessageSignature(messageID ids.ID) ([bls s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) }() - return s.backend.GetMessageSignature(messageID) + return s.backend.GetMessageSignature(message) } func (s *SignatureRequestHandlerP2P) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { diff --git a/warp/handlers/signature_request_p2p_test.go b/warp/handlers/signature_request_p2p_test.go index 1f8f9530cb..677eb6e1f9 100644 --- a/warp/handlers/signature_request_p2p_test.go +++ b/warp/handlers/signature_request_p2p_test.go @@ -44,9 +44,9 @@ func TestMessageSignatureHandlerP2P(t *testing.T) { require.NoError(t, err) messageID := msg.ID() require.NoError(t, backend.AddMessage(msg)) - signature, err := backend.GetMessageSignature(messageID) + signature, err := backend.GetMessageSignatureByID(messageID) require.NoError(t, err) - offchainSignature, err := backend.GetMessageSignature(offchainMessage.ID()) + offchainSignature, err := backend.GetMessageSignatureByID(offchainMessage.ID()) require.NoError(t, err) unknownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("unknown message")) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 172f182c96..e723a50f86 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -38,9 +38,9 @@ func TestMessageSignatureHandler(t *testing.T) { require.NoError(t, err) messageID := msg.ID() require.NoError(t, backend.AddMessage(msg)) - signature, err := backend.GetMessageSignature(messageID) + signature, err := backend.GetMessageSignatureByID(messageID) require.NoError(t, err) - offchainSignature, err := backend.GetMessageSignature(offchainMessage.ID()) + offchainSignature, err := backend.GetMessageSignatureByID(offchainMessage.ID()) require.NoError(t, err) unknownMessageID := ids.GenerateTestID() diff --git a/warp/handlers/validator_uptime_handler.go b/warp/handlers/validator_uptime_handler.go new file mode 100644 index 0000000000..8f488b07fe --- /dev/null +++ b/warp/handlers/validator_uptime_handler.go @@ -0,0 +1,38 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "errors" + "fmt" + + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/messages" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ethereum/go-ethereum/log" +) + +var errInvalidRequest = errors.New("invalid request") + +type ValidatorUptimeHandler struct{} + +func (v *ValidatorUptimeHandler) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { + parsed, err := payload.ParseAddressedCall(unsignedMessage.Payload) + if err != nil { + return fmt.Errorf("failed to parse payload: %w", err) + } + // TODO: Does nil/empty SourceAddress matter? + if len(parsed.SourceAddress) != 0 { + return errInvalidRequest + } + + vdr, err := messages.ParseValidatorUptime(parsed.Payload) + if err != nil { + return fmt.Errorf("failed to parse validator uptime message: %w", err) + } + + log.Info("Received validator uptime message", "validationID", vdr.ValidationID, "totalUptime", vdr.TotalUptime) + log.Warn("Signing validator uptime message by default, not production behavior", "validationID", vdr.ValidationID, "totalUptime", vdr.TotalUptime) + return nil +} diff --git a/warp/handlers/validator_uptime_handler_test.go b/warp/handlers/validator_uptime_handler_test.go new file mode 100644 index 0000000000..d2a094c150 --- /dev/null +++ b/warp/handlers/validator_uptime_handler_test.go @@ -0,0 +1,35 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "testing" + + "github.com/ava-labs/avalanchego/ids" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/messages" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/stretchr/testify/require" +) + +func TestValidatorUptimeHandler(t *testing.T) { + require := require.New(t) + + v := &ValidatorUptimeHandler{} + + validationID := ids.GenerateTestID() + totalUptime := uint64(1_000_000) // arbitrary value + vdrUptime, err := messages.NewValidatorUptime(validationID, totalUptime) + require.NoError(err) + + addressedCall, err := payload.NewAddressedCall(nil, vdrUptime.Bytes()) + require.NoError(err) + + networkID := uint32(0) + sourceChain := ids.Empty + message, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChain, addressedCall.Bytes()) + require.NoError(err) + + require.NoError(v.ValidateMessage(message)) +} diff --git a/warp/service.go b/warp/service.go index 2bd310f38d..64c0200c78 100644 --- a/warp/service.go +++ b/warp/service.go @@ -51,7 +51,7 @@ func (a *API) GetMessage(ctx context.Context, messageID ids.ID) (hexutil.Bytes, // GetMessageSignature returns the BLS signature associated with a messageID. func (a *API) GetMessageSignature(ctx context.Context, messageID ids.ID) (hexutil.Bytes, error) { - signature, err := a.backend.GetMessageSignature(messageID) + signature, err := a.backend.GetMessageSignatureByID(messageID) if err != nil { return nil, fmt.Errorf("failed to get signature for message %s with error %w", messageID, err) } From 964e19390e356f87197c6e9ed0e3d07483f182b6 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 6 Aug 2024 16:24:19 -0700 Subject: [PATCH 03/98] nits --- warp/backend.go | 2 +- warp/handlers/signature_request_p2p.go | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/warp/backend.go b/warp/backend.go index 59b6283bd0..b747f1418b 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -46,7 +46,7 @@ type Backend interface { // messages that pass any of the validators, in addition to those known in the db. AddMessageValidator(validator MessageValidator) - // GetMessageSignatureByID returns the signature of the requested message. + // GetMessageSignature returns the signature of the requested message. GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) // GetMessageSignatureByID returns the signature of the requested message hash. diff --git a/warp/handlers/signature_request_p2p.go b/warp/handlers/signature_request_p2p.go index 227aaa1a31..4fede3da33 100644 --- a/warp/handlers/signature_request_p2p.go +++ b/warp/handlers/signature_request_p2p.go @@ -39,8 +39,6 @@ type SignatureRequestHandlerP2P struct { backend warp.Backend codec codec.Manager stats *handlerStats - - addressedPayloadHandlers []AddressedCallHandler } func NewSignatureRequestHandlerP2P(backend warp.Backend, codec codec.Manager) *SignatureRequestHandlerP2P { @@ -51,10 +49,6 @@ func NewSignatureRequestHandlerP2P(backend warp.Backend, codec codec.Manager) *S } } -func (s *SignatureRequestHandlerP2P) AddAddressedCallHandler(handler AddressedCallHandler) { - s.addressedPayloadHandlers = append(s.addressedPayloadHandlers, handler) -} - func (s *SignatureRequestHandlerP2P) AppRequest( ctx context.Context, nodeID ids.NodeID, From a5cde373560812c1e2fce60df2955a307dccce92 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 6 Aug 2024 16:34:44 -0700 Subject: [PATCH 04/98] cleanup --- plugin/evm/vm_warp_test.go | 7 +++---- warp/backend.go | 12 ----------- warp/backend_test.go | 23 ++++++++------------- warp/handlers/signature_request.go | 9 +++++++- warp/handlers/signature_request_p2p_test.go | 5 ++--- warp/handlers/signature_request_test.go | 4 ++-- warp/service.go | 6 +++++- 7 files changed, 29 insertions(+), 37 deletions(-) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 16f9893d6f..93e1e97778 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -109,10 +109,9 @@ func TestSendWarpMessage(t *testing.T) { logData := receipts[0].Logs[0].Data unsignedMessage, err := warp.UnpackSendWarpEventDataToMessage(logData) require.NoError(err) - unsignedMessageID := unsignedMessage.ID() // Verify the signature cannot be fetched before the block is accepted - _, err = vm.warpBackend.GetMessageSignatureByID(unsignedMessageID) + _, err = vm.warpBackend.GetMessageSignature(unsignedMessage) require.Error(err) _, err = vm.warpBackend.GetBlockSignature(blk.ID()) require.Error(err) @@ -122,7 +121,7 @@ func TestSendWarpMessage(t *testing.T) { vm.blockChain.DrainAcceptorQueue() // Verify the message signature after accepting the block. - rawSignatureBytes, err := vm.warpBackend.GetMessageSignatureByID(unsignedMessageID) + rawSignatureBytes, err := vm.warpBackend.GetMessageSignature(unsignedMessage) require.NoError(err) blsSignature, err := bls.SignatureFromBytes(rawSignatureBytes[:]) require.NoError(err) @@ -595,7 +594,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { // Add the known message and get its signature to confirm. err = vm.warpBackend.AddMessage(warpMessage) require.NoError(t, err) - signature, err := vm.warpBackend.GetMessageSignatureByID(warpMessage.ID()) + signature, err := vm.warpBackend.GetMessageSignature(warpMessage) require.NoError(t, err) tests := map[string]struct { diff --git a/warp/backend.go b/warp/backend.go index b747f1418b..f8558b5195 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -49,10 +49,6 @@ type Backend interface { // GetMessageSignature returns the signature of the requested message. GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) - // GetMessageSignatureByID returns the signature of the requested message hash. - // TODO: should we deprecate this method? - GetMessageSignatureByID(messageID ids.ID) ([bls.SignatureLen]byte, error) - // GetBlockSignature returns the signature of the requested message hash. GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) @@ -200,14 +196,6 @@ func (b *backend) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage return nil } -func (b *backend) GetMessageSignatureByID(messageID ids.ID) ([bls.SignatureLen]byte, error) { - unsignedMessage, err := b.GetMessage(messageID) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get warp message %s: %w", messageID, err) - } - return b.GetMessageSignature(unsignedMessage) -} - func (b *backend) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { log.Debug("Getting block from backend", "blockID", blockID) if sig, ok := b.blockSignatureCache.Get(blockID); ok { diff --git a/warp/backend_test.go b/warp/backend_test.go index 789b73aa74..756e5f48a4 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -10,7 +10,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" - "github.com/ava-labs/avalanchego/utils/hashing" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/warp/warptest" @@ -49,18 +48,17 @@ func TestClearDB(t *testing.T) { // use multiple messages to test that all messages get cleared payloads := [][]byte{[]byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), []byte("test5")} - messageIDs := []ids.ID{} + messages := make([]*avalancheWarp.UnsignedMessage, 0, len(payloads)) // add all messages for _, payload := range payloads { unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, payload) require.NoError(t, err) - messageID := hashing.ComputeHash256Array(unsignedMsg.Bytes()) - messageIDs = append(messageIDs, messageID) + messages = append(messages, unsignedMsg) err = backend.AddMessage(unsignedMsg) require.NoError(t, err) // ensure that the message was added - _, err = backend.GetMessageSignatureByID(messageID) + _, err = backend.GetMessageSignature(unsignedMsg) require.NoError(t, err) } @@ -74,8 +72,8 @@ func TestClearDB(t *testing.T) { require.False(t, it.Next()) // ensure all messages have been deleted - for _, messageID := range messageIDs { - _, err := backend.GetMessageSignatureByID(messageID) + for _, message := range messages { + _, err := backend.GetMessageSignature(message) require.ErrorContains(t, err, "failed to get warp message") } } @@ -94,8 +92,7 @@ func TestAddAndGetValidMessage(t *testing.T) { require.NoError(t, err) // Verify that a signature is returned successfully, and compare to expected signature. - messageID := testUnsignedMessage.ID() - signature, err := backend.GetMessageSignatureByID(messageID) + signature, err := backend.GetMessageSignature(testUnsignedMessage) require.NoError(t, err) expectedSig, err := warpSigner.Sign(testUnsignedMessage) @@ -113,8 +110,7 @@ func TestAddAndGetUnknownMessage(t *testing.T) { require.NoError(t, err) // Try getting a signature for a message that was not added. - messageID := testUnsignedMessage.ID() - _, err = backend.GetMessageSignatureByID(messageID) + _, err = backend.GetMessageSignature(testUnsignedMessage) require.Error(t, err) } @@ -162,8 +158,7 @@ func TestZeroSizedCache(t *testing.T) { require.NoError(t, err) // Verify that a signature is returned successfully, and compare to expected signature. - messageID := testUnsignedMessage.ID() - signature, err := backend.GetMessageSignatureByID(messageID) + signature, err := backend.GetMessageSignature(testUnsignedMessage) require.NoError(t, err) expectedSig, err := warpSigner.Sign(testUnsignedMessage) @@ -192,7 +187,7 @@ func TestOffChainMessages(t *testing.T) { require.NoError(err) require.Equal(testUnsignedMessage.Bytes(), msg.Bytes()) - signature, err := b.GetMessageSignatureByID(testUnsignedMessage.ID()) + signature, err := b.GetMessageSignature(testUnsignedMessage) require.NoError(err) expectedSignatureBytes, err := warpSigner.Sign(msg) require.NoError(err) diff --git a/warp/handlers/signature_request.go b/warp/handlers/signature_request.go index 25374de1ee..d37f301b31 100644 --- a/warp/handlers/signature_request.go +++ b/warp/handlers/signature_request.go @@ -45,7 +45,14 @@ func (s *SignatureRequestHandler) OnMessageSignatureRequest(ctx context.Context, s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) }() - signature, err := s.backend.GetMessageSignatureByID(signatureRequest.MessageID) + unsignedMessage, err := s.backend.GetMessage(signatureRequest.MessageID) + if err != nil { + log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) + s.stats.IncMessageSignatureMiss() + return nil, nil + } + + signature, err := s.backend.GetMessageSignature(unsignedMessage) if err != nil { log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) s.stats.IncMessageSignatureMiss() diff --git a/warp/handlers/signature_request_p2p_test.go b/warp/handlers/signature_request_p2p_test.go index 677eb6e1f9..3104fe59b3 100644 --- a/warp/handlers/signature_request_p2p_test.go +++ b/warp/handlers/signature_request_p2p_test.go @@ -42,11 +42,10 @@ func TestMessageSignatureHandlerP2P(t *testing.T) { require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, offchainPayload.Bytes()) require.NoError(t, err) - messageID := msg.ID() require.NoError(t, backend.AddMessage(msg)) - signature, err := backend.GetMessageSignatureByID(messageID) + signature, err := backend.GetMessageSignature(msg) require.NoError(t, err) - offchainSignature, err := backend.GetMessageSignatureByID(offchainMessage.ID()) + offchainSignature, err := backend.GetMessageSignature(offchainMessage) require.NoError(t, err) unknownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("unknown message")) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index e723a50f86..1f699324cc 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -38,9 +38,9 @@ func TestMessageSignatureHandler(t *testing.T) { require.NoError(t, err) messageID := msg.ID() require.NoError(t, backend.AddMessage(msg)) - signature, err := backend.GetMessageSignatureByID(messageID) + signature, err := backend.GetMessageSignature(msg) require.NoError(t, err) - offchainSignature, err := backend.GetMessageSignatureByID(offchainMessage.ID()) + offchainSignature, err := backend.GetMessageSignature(offchainMessage) require.NoError(t, err) unknownMessageID := ids.GenerateTestID() diff --git a/warp/service.go b/warp/service.go index 64c0200c78..2bff8df77e 100644 --- a/warp/service.go +++ b/warp/service.go @@ -51,7 +51,11 @@ func (a *API) GetMessage(ctx context.Context, messageID ids.ID) (hexutil.Bytes, // GetMessageSignature returns the BLS signature associated with a messageID. func (a *API) GetMessageSignature(ctx context.Context, messageID ids.ID) (hexutil.Bytes, error) { - signature, err := a.backend.GetMessageSignatureByID(messageID) + unsignedMessage, err := a.backend.GetMessage(messageID) + if err != nil { + return nil, fmt.Errorf("failed to get message %s with error %w", messageID, err) + } + signature, err := a.backend.GetMessageSignature(unsignedMessage) if err != nil { return nil, fmt.Errorf("failed to get signature for message %s with error %w", messageID, err) } From c3638868d782847a23621bce534be1026b812af1 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 6 Aug 2024 16:43:53 -0700 Subject: [PATCH 05/98] assign to correct `err` --- warp/backend.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/warp/backend.go b/warp/backend.go index f8558b5195..2f919be257 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -167,7 +167,7 @@ func (b *backend) GetMessageSignature(unsignedMessage *avalancheWarp.UnsignedMes var err error for _, v := range append(b.messageValidators, b) { - err := v.ValidateMessage(unsignedMessage) + err = v.ValidateMessage(unsignedMessage) if err == nil { break } From 0bf1de65b2ebd21e9b74a0cc7332fe4dee04aeb6 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 6 Aug 2024 16:53:05 -0700 Subject: [PATCH 06/98] fix handler --- warp/handlers/signature_request.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/warp/handlers/signature_request.go b/warp/handlers/signature_request.go index d37f301b31..3a28cd994e 100644 --- a/warp/handlers/signature_request.go +++ b/warp/handlers/signature_request.go @@ -45,20 +45,20 @@ func (s *SignatureRequestHandler) OnMessageSignatureRequest(ctx context.Context, s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) }() + var signature [bls.SignatureLen]byte unsignedMessage, err := s.backend.GetMessage(signatureRequest.MessageID) if err != nil { - log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) + log.Debug("Unknown warp message requested", "messageID", signatureRequest.MessageID) s.stats.IncMessageSignatureMiss() - return nil, nil - } - - signature, err := s.backend.GetMessageSignature(unsignedMessage) - if err != nil { - log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) - s.stats.IncMessageSignatureMiss() - signature = [bls.SignatureLen]byte{} } else { - s.stats.IncMessageSignatureHit() + signature, err = s.backend.GetMessageSignature(unsignedMessage) + if err != nil { + log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) + s.stats.IncMessageSignatureMiss() + signature = [bls.SignatureLen]byte{} + } else { + s.stats.IncMessageSignatureHit() + } } response := message.SignatureResponse{Signature: signature} From 8b3fb1cc89bfab9f1c341ddd84b06c661c259494 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Mon, 19 Aug 2024 14:22:11 -0700 Subject: [PATCH 07/98] move ValidatorUptime type to subnet-evm --- warp/handlers/validator_uptime_handler.go | 2 +- .../handlers/validator_uptime_handler_test.go | 2 +- warp/messages/codec.go | 33 ++++++++++++ warp/messages/payload.go | 39 ++++++++++++++ warp/messages/validator_uptime.go | 51 +++++++++++++++++++ 5 files changed, 125 insertions(+), 2 deletions(-) create mode 100644 warp/messages/codec.go create mode 100644 warp/messages/payload.go create mode 100644 warp/messages/validator_uptime.go diff --git a/warp/handlers/validator_uptime_handler.go b/warp/handlers/validator_uptime_handler.go index 8f488b07fe..7b9e286304 100644 --- a/warp/handlers/validator_uptime_handler.go +++ b/warp/handlers/validator_uptime_handler.go @@ -8,8 +8,8 @@ import ( "fmt" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/messages" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/warp/messages" "github.com/ethereum/go-ethereum/log" ) diff --git a/warp/handlers/validator_uptime_handler_test.go b/warp/handlers/validator_uptime_handler_test.go index d2a094c150..e0a009faad 100644 --- a/warp/handlers/validator_uptime_handler_test.go +++ b/warp/handlers/validator_uptime_handler_test.go @@ -8,8 +8,8 @@ import ( "github.com/ava-labs/avalanchego/ids" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/messages" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/warp/messages" "github.com/stretchr/testify/require" ) diff --git a/warp/messages/codec.go b/warp/messages/codec.go new file mode 100644 index 0000000000..87d2fa334a --- /dev/null +++ b/warp/messages/codec.go @@ -0,0 +1,33 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package messages + +import ( + "errors" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils/units" +) + +const ( + CodecVersion = 0 + + MaxMessageSize = 24 * units.KiB +) + +var Codec codec.Manager + +func init() { + Codec = codec.NewManager(MaxMessageSize) + lc := linearcodec.NewDefault() + + err := errors.Join( + lc.RegisterType(&ValidatorUptime{}), + Codec.RegisterCodec(CodecVersion, lc), + ) + if err != nil { + panic(err) + } +} diff --git a/warp/messages/payload.go b/warp/messages/payload.go new file mode 100644 index 0000000000..facf54524d --- /dev/null +++ b/warp/messages/payload.go @@ -0,0 +1,39 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package messages + +import ( + "errors" + "fmt" +) + +var errWrongType = errors.New("wrong payload type") + +// Payload provides a common interface for all payloads implemented by this +// package. +type Payload interface { + // Bytes returns the binary representation of this payload. + Bytes() []byte + + // initialize the payload with the provided binary representation. + initialize(b []byte) +} + +func Parse(bytes []byte) (Payload, error) { + var payload Payload + if _, err := Codec.Unmarshal(bytes, &payload); err != nil { + return nil, err + } + payload.initialize(bytes) + return payload, nil +} + +func initialize(p Payload) error { + bytes, err := Codec.Marshal(CodecVersion, &p) + if err != nil { + return fmt.Errorf("couldn't marshal %T payload: %w", p, err) + } + p.initialize(bytes) + return nil +} diff --git a/warp/messages/validator_uptime.go b/warp/messages/validator_uptime.go new file mode 100644 index 0000000000..3d3e4dd5dd --- /dev/null +++ b/warp/messages/validator_uptime.go @@ -0,0 +1,51 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package messages + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +// ValidatorUptime is signed when the ValidationID is known and the validator +// has been up for TotalUptime seconds. +type ValidatorUptime struct { + ValidationID ids.ID `serialize:"true"` + TotalUptime uint64 `serialize:"true"` + + bytes []byte +} + +// NewValidatorUptime creates a new *ValidatorUptime and initializes it. +func NewValidatorUptime(validationID ids.ID, totalUptime uint64) (*ValidatorUptime, error) { + bhp := &ValidatorUptime{ + ValidationID: validationID, + TotalUptime: totalUptime, + } + return bhp, initialize(bhp) +} + +// ParseValidatorUptime converts a slice of bytes into an initialized ValidatorUptime. +func ParseValidatorUptime(b []byte) (*ValidatorUptime, error) { + payloadIntf, err := Parse(b) + if err != nil { + return nil, err + } + payload, ok := payloadIntf.(*ValidatorUptime) + if !ok { + return nil, fmt.Errorf("%w: %T", errWrongType, payloadIntf) + } + return payload, nil +} + +// Bytes returns the binary representation of this payload. It assumes that the +// payload is initialized from either NewValidatorUptime or Parse. +func (b *ValidatorUptime) Bytes() []byte { + return b.bytes +} + +func (b *ValidatorUptime) initialize(bytes []byte) { + b.bytes = bytes +} From 008bc37f50d2df0ef2b31f1470069ce543bca8a9 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Mon, 19 Aug 2024 14:41:12 -0700 Subject: [PATCH 08/98] disable always signing --- plugin/evm/vm.go | 1 - warp/handlers/validator_uptime_handler.go | 38 ------------------- .../handlers/validator_uptime_handler_test.go | 35 ----------------- 3 files changed, 74 deletions(-) delete mode 100644 warp/handlers/validator_uptime_handler.go delete mode 100644 warp/handlers/validator_uptime_handler_test.go diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index dd3af26df8..2c6dfd55b9 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -497,7 +497,6 @@ func (vm *VM) Initialize( if err != nil { return err } - vm.warpBackend.AddMessageValidator(&handlers.ValidatorUptimeHandler{}) // clear warpdb on initialization if config enabled if vm.config.PruneWarpDB { diff --git a/warp/handlers/validator_uptime_handler.go b/warp/handlers/validator_uptime_handler.go deleted file mode 100644 index 7b9e286304..0000000000 --- a/warp/handlers/validator_uptime_handler.go +++ /dev/null @@ -1,38 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package handlers - -import ( - "errors" - "fmt" - - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/subnet-evm/warp/messages" - "github.com/ethereum/go-ethereum/log" -) - -var errInvalidRequest = errors.New("invalid request") - -type ValidatorUptimeHandler struct{} - -func (v *ValidatorUptimeHandler) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { - parsed, err := payload.ParseAddressedCall(unsignedMessage.Payload) - if err != nil { - return fmt.Errorf("failed to parse payload: %w", err) - } - // TODO: Does nil/empty SourceAddress matter? - if len(parsed.SourceAddress) != 0 { - return errInvalidRequest - } - - vdr, err := messages.ParseValidatorUptime(parsed.Payload) - if err != nil { - return fmt.Errorf("failed to parse validator uptime message: %w", err) - } - - log.Info("Received validator uptime message", "validationID", vdr.ValidationID, "totalUptime", vdr.TotalUptime) - log.Warn("Signing validator uptime message by default, not production behavior", "validationID", vdr.ValidationID, "totalUptime", vdr.TotalUptime) - return nil -} diff --git a/warp/handlers/validator_uptime_handler_test.go b/warp/handlers/validator_uptime_handler_test.go deleted file mode 100644 index e0a009faad..0000000000 --- a/warp/handlers/validator_uptime_handler_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package handlers - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/subnet-evm/warp/messages" - "github.com/stretchr/testify/require" -) - -func TestValidatorUptimeHandler(t *testing.T) { - require := require.New(t) - - v := &ValidatorUptimeHandler{} - - validationID := ids.GenerateTestID() - totalUptime := uint64(1_000_000) // arbitrary value - vdrUptime, err := messages.NewValidatorUptime(validationID, totalUptime) - require.NoError(err) - - addressedCall, err := payload.NewAddressedCall(nil, vdrUptime.Bytes()) - require.NoError(err) - - networkID := uint32(0) - sourceChain := ids.Empty - message, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChain, addressedCall.Bytes()) - require.NoError(err) - - require.NoError(v.ValidateMessage(message)) -} From 48f0ab795ec44594a9518d79a0ae11a8c2f5922e Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 20 Aug 2024 10:34:29 -0700 Subject: [PATCH 09/98] implement on the type itself --- warp/backend.go | 38 +++++++++++++++++++++++++++----------- warp/messages/payload.go | 6 ++++++ 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/warp/backend.go b/warp/backend.go index 2f919be257..867f0e929f 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/warp/messages" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) @@ -165,14 +166,7 @@ func (b *backend) GetMessageSignature(unsignedMessage *avalancheWarp.UnsignedMes return sig, nil } - var err error - for _, v := range append(b.messageValidators, b) { - err = v.ValidateMessage(unsignedMessage) - if err == nil { - break - } - } - if err != nil { + if err := b.ValidateMessage(unsignedMessage); err != nil { return [bls.SignatureLen]byte{}, fmt.Errorf("failed to validate warp message: %w", err) } @@ -188,10 +182,32 @@ func (b *backend) GetMessageSignature(unsignedMessage *avalancheWarp.UnsignedMes } func (b *backend) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { - messageID := unsignedMessage.ID() - _, err := b.GetMessage(messageID) + // Known on-chain messages should be signed + if _, err := b.GetMessage(unsignedMessage.ID()); err == nil { + return nil + } + + // Try to parse the payload as an AddressedCall + addressedCall, err := payload.ParseAddressedCall(unsignedMessage.Payload) if err != nil { - return fmt.Errorf("failed to get warp message %s from db: %w", messageID.String(), err) + return fmt.Errorf("failed to parse unknown message as AddressedCall: %w", err) + } + + // Further, parse the payload to see if it is a known type. + parsed, err := messages.Parse(addressedCall.Payload) + if err != nil { + return fmt.Errorf("failed to parse unknown message: %w", err) + } + + // Check if the message is a known type that can be signed on demand + signable, ok := parsed.(messages.Signable) + if !ok { + return fmt.Errorf("parsed message is not Signable: %T", signable) + } + + // Check if the message should be signed according to its type + if err := signable.VerifyMesssage(addressedCall.SourceAddress); err != nil { + return fmt.Errorf("failed to verify Signable message: %w", err) } return nil } diff --git a/warp/messages/payload.go b/warp/messages/payload.go index facf54524d..3776a1356d 100644 --- a/warp/messages/payload.go +++ b/warp/messages/payload.go @@ -20,6 +20,12 @@ type Payload interface { initialize(b []byte) } +// Signable is an optional interface that payloads can implement to allow +// on-the-fly signing of incoming messages by the warp backend. +type Signable interface { + VerifyMesssage(sourceAddress []byte) error +} + func Parse(bytes []byte) (Payload, error) { var payload Payload if _, err := Codec.Unmarshal(bytes, &payload); err != nil { From 991ff463cde8742b006e587f3667fd084abaefde Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 20 Aug 2024 10:57:24 -0700 Subject: [PATCH 10/98] remove unneeded code --- warp/backend.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/warp/backend.go b/warp/backend.go index 867f0e929f..360161a336 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -31,22 +31,12 @@ type BlockClient interface { GetAcceptedBlock(ctx context.Context, blockID ids.ID) (snowman.Block, error) } -type MessageValidator interface { - // If the validator returns nil, the message is considered valid and the - // backend will sign it. - ValidateMessage(*avalancheWarp.UnsignedMessage) error -} - // Backend tracks signature-eligible warp messages and provides an interface to fetch them. // The backend is also used to query for warp message signatures by the signature request handler. type Backend interface { // AddMessage signs [unsignedMessage] and adds it to the warp backend database AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error - // AddMessageValidator adds a validator to the backend. The backend will sign - // messages that pass any of the validators, in addition to those known in the db. - AddMessageValidator(validator MessageValidator) - // GetMessageSignature returns the signature of the requested message. GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) @@ -73,7 +63,6 @@ type backend struct { blockSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage - messageValidators []MessageValidator } // NewBackend creates a new Backend, and initializes the signature cache and message tracking database. @@ -100,10 +89,6 @@ func NewBackend( return b, b.initOffChainMessages(offchainMessages) } -func (b *backend) AddMessageValidator(validator MessageValidator) { - b.messageValidators = append(b.messageValidators, validator) -} - func (b *backend) initOffChainMessages(offchainMessages [][]byte) error { for i, offchainMsg := range offchainMessages { unsignedMsg, err := avalancheWarp.ParseUnsignedMessage(offchainMsg) From fe05d33d3b2d6ca036ff56dad7e3b920f7645233 Mon Sep 17 00:00:00 2001 From: Darioush Jalali Date: Tue, 20 Aug 2024 11:14:12 -0700 Subject: [PATCH 11/98] fix ut --- warp/backend_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/warp/backend_test.go b/warp/backend_test.go index 756e5f48a4..21013dfc24 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -74,7 +74,7 @@ func TestClearDB(t *testing.T) { // ensure all messages have been deleted for _, message := range messages { _, err := backend.GetMessageSignature(message) - require.ErrorContains(t, err, "failed to get warp message") + require.ErrorContains(t, err, "failed to validate warp message") } } From ce36343dc4191aad469f8154aad5fc917ac95802 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 15:58:30 -0400 Subject: [PATCH 12/98] add validator state --- go.mod | 2 +- go.sum | 4 +- plugin/evm/validators/codec.go | 34 +++ plugin/evm/validators/state.go | 313 ++++++++++++++++++++++++++++ plugin/evm/validators/state_test.go | 288 +++++++++++++++++++++++++ 5 files changed, 638 insertions(+), 3 deletions(-) create mode 100644 plugin/evm/validators/codec.go create mode 100644 plugin/evm/validators/state.go create mode 100644 plugin/evm/validators/state_test.go diff --git a/go.mod b/go.mod index cc35468149..9090863110 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.12 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.11 + github.com/ava-labs/avalanchego v1.11.12-0.20240910093928-8c6a486205ce github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 32359d6de1..3875c5af65 100644 --- a/go.sum +++ b/go.sum @@ -58,8 +58,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.11 h1:MIQq8xRavRj4ZXHA4G+aMiymig7SOScGOG1SApmMvBc= -github.com/ava-labs/avalanchego v1.11.11/go.mod h1:yFx3V31Jy9NFa8GZlgGnwiVf8KGjeF2+Uc99l9Scd/8= +github.com/ava-labs/avalanchego v1.11.12-0.20240910093928-8c6a486205ce h1:JXET0nfer0BA15bvtYXPrWt+Z09BrkqN6/PxuIu5uqU= +github.com/ava-labs/avalanchego v1.11.12-0.20240910093928-8c6a486205ce/go.mod h1:yFx3V31Jy9NFa8GZlgGnwiVf8KGjeF2+Uc99l9Scd/8= github.com/ava-labs/coreth v0.13.8-fixed-genesis-upgrade.0.20240815193440-a96bc921e732 h1:wlhGJbmb7s3bU2QWtxKjscGjfHknQiq+cVhhUjONsB8= github.com/ava-labs/coreth v0.13.8-fixed-genesis-upgrade.0.20240815193440-a96bc921e732/go.mod h1:RkQLaQ961Xe/sUb3ycn4Qi18vPPuEetTqDf2eDcquAs= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/plugin/evm/validators/codec.go b/plugin/evm/validators/codec.go new file mode 100644 index 0000000000..dadba8b273 --- /dev/null +++ b/plugin/evm/validators/codec.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "math" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +const ( + codecVersion = uint16(0) +) + +var vdrCodec codec.Manager + +func init() { + vdrCodec = codec.NewManager(math.MaxInt32) + c := linearcodec.NewDefault() + + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(validatorData{}), + + vdrCodec.RegisterCodec(codecVersion, c), + ) + + if errs.Errored() { + panic(errs.Err) + } +} diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go new file mode 100644 index 0000000000..c5218fef5a --- /dev/null +++ b/plugin/evm/validators/state.go @@ -0,0 +1,313 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ uptime.State = &state{} + +type ValidatorState interface { + uptime.State + // AddNewValidator adds a new validator to the state + AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error + // DeleteValidator deletes the validator from the state + DeleteValidator(vID ids.ID) error + // WriteValidatorState writes the validator state to the disk + WriteValidatorState() error + + // SetStatus sets the active status of the validator with the given vID + SetStatus(vID ids.ID, isActive bool) error + // GetStatus returns the active status of the validator with the given vID + GetStatus(vID ids.ID) (bool, error) + + // GetValidationIDs returns the validation IDs in the state + GetValidationIDs() set.Set[ids.ID] + // GetValidatorIDs returns the validator node IDs in the state + GetValidatorIDs() set.Set[ids.NodeID] + + // RegisterListener registers a listener to the state + RegisterListener(ValidatorsCallbackListener) +} + +// ValidatorsCallbackListener is a listener for the validator state +type ValidatorsCallbackListener interface { + // OnValidatorAdded is called when a new validator is added + OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) + // OnValidatorRemoved is called when a validator is removed + OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) + // OnValidatorStatusUpdated is called when a validator status is updated + OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) +} + +type validatorData struct { + UpDuration time.Duration `serialize:"true"` + LastUpdated uint64 `serialize:"true"` + NodeID ids.NodeID `serialize:"true"` + StartTime uint64 `serialize:"true"` + IsActive bool `serialize:"true"` + + validationID ids.ID // database key + lastUpdated time.Time + startTime time.Time +} + +type state struct { + data map[ids.ID]*validatorData // vID -> validatorData + index map[ids.NodeID]ids.ID // nodeID -> vID + // updatedData tracks the updates since las WriteValidator was called + updatedData map[ids.ID]bool // vID -> true(updated)/false(deleted) + db database.Database + + listeners []ValidatorsCallbackListener +} + +// NewValidatorState creates a new ValidatorState, it also loads the data from the disk +func NewValidatorState(db database.Database) (ValidatorState, error) { + m := &state{ + index: make(map[ids.NodeID]ids.ID), + data: make(map[ids.ID]*validatorData), + updatedData: make(map[ids.ID]bool), + db: db, + } + if err := m.loadFromDisk(); err != nil { + return nil, fmt.Errorf("failed to load data from disk: %w", err) + } + return m, nil +} + +// GetUptime returns the uptime of the validator with the given nodeID +func (m *state) GetUptime( + nodeID ids.NodeID, +) (time.Duration, time.Time, error) { + data, err := m.getData(nodeID) + if err != nil { + return 0, time.Time{}, err + } + return data.UpDuration, data.lastUpdated, nil +} + +// SetUptime sets the uptime of the validator with the given nodeID +func (m *state) SetUptime( + nodeID ids.NodeID, + upDuration time.Duration, + lastUpdated time.Time, +) error { + data, err := m.getData(nodeID) + if err != nil { + return err + } + data.UpDuration = upDuration + data.lastUpdated = lastUpdated + + m.updatedData[data.validationID] = true + return nil +} + +// GetStartTime returns the start time of the validator with the given nodeID +func (m *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { + data, err := m.getData(nodeID) + if err != nil { + return time.Time{}, err + } + return data.startTime, nil +} + +// AddNewValidator adds a new validator to the state +// the new validator is marked as updated and will be written to the disk when WriteValidatorState is called +func (m *state) AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { + startTimeUnix := time.Unix(int64(startTimestamp), 0) + + data := &validatorData{ + NodeID: nodeID, + validationID: vID, + IsActive: isActive, + StartTime: startTimestamp, + UpDuration: 0, + LastUpdated: startTimestamp, + lastUpdated: startTimeUnix, + startTime: startTimeUnix, + } + if err := m.putData(vID, data); err != nil { + return err + } + + m.updatedData[vID] = true + + for _, listener := range m.listeners { + listener.OnValidatorAdded(vID, nodeID, startTimestamp, isActive) + } + return nil +} + +// DeleteValidator marks the validator as deleted +// marked validator will be deleted from disk when WriteValidatorState is called +func (m *state) DeleteValidator(vID ids.ID) error { + data, exists := m.data[vID] + if !exists { + return database.ErrNotFound + } + delete(m.data, data.validationID) + delete(m.index, data.NodeID) + + // mark as deleted for WriteValidator + m.updatedData[data.validationID] = false + + for _, listener := range m.listeners { + listener.OnValidatorRemoved(vID, data.NodeID) + } + return nil +} + +// WriteValidatorState writes the updated state to the disk +func (m *state) WriteValidatorState() error { + // TODO: consider adding batch size + batch := m.db.NewBatch() + for vID, updated := range m.updatedData { + if updated { + data := m.data[vID] + data.LastUpdated = uint64(data.lastUpdated.Unix()) + // should never change but in case + data.StartTime = uint64(data.startTime.Unix()) + + dataBytes, err := vdrCodec.Marshal(codecVersion, data) + if err != nil { + return err + } + if err := batch.Put(vID[:], dataBytes); err != nil { + return err + } + } else { // deleted + if err := batch.Delete(vID[:]); err != nil { + return err + } + } + // we're done, remove the updated marker + delete(m.updatedData, vID) + } + return batch.Write() +} + +// SetStatus sets the active status of the validator with the given vID +func (m *state) SetStatus(vID ids.ID, isActive bool) error { + data, exists := m.data[vID] + if !exists { + return database.ErrNotFound + } + data.IsActive = isActive + m.updatedData[vID] = true + + for _, listener := range m.listeners { + listener.OnValidatorStatusUpdated(vID, data.NodeID, isActive) + } + return nil +} + +// GetStatus returns the active status of the validator with the given vID +func (m *state) GetStatus(vID ids.ID) (bool, error) { + data, exists := m.data[vID] + if !exists { + return false, database.ErrNotFound + } + return data.IsActive, nil +} + +// GetValidationIDs returns the validation IDs in the state +func (m *state) GetValidationIDs() set.Set[ids.ID] { + ids := set.NewSet[ids.ID](len(m.data)) + for vID := range m.data { + ids.Add(vID) + } + return ids +} + +// GetValidatorIDs returns the validator IDs in the state +func (m *state) GetValidatorIDs() set.Set[ids.NodeID] { + ids := set.NewSet[ids.NodeID](len(m.index)) + for nodeID := range m.index { + ids.Add(nodeID) + } + return ids +} + +// RegisterListener registers a listener to the state +// the listener will be notified of current validators via OnValidatorAdded +func (m *state) RegisterListener(listener ValidatorsCallbackListener) { + m.listeners = append(m.listeners, listener) + + // notify the listener of the current state + for vID, data := range m.data { + listener.OnValidatorAdded(vID, data.NodeID, uint64(data.startTime.Unix()), data.IsActive) + } +} + +// parseValidatorData parses the data from the bytes into given validatorData +func parseValidatorData(bytes []byte, data *validatorData) error { + if len(bytes) != 0 { + if _, err := vdrCodec.Unmarshal(bytes, data); err != nil { + return err + } + } + data.lastUpdated = time.Unix(int64(data.LastUpdated), 0) + data.startTime = time.Unix(int64(data.StartTime), 0) + return nil +} + +// Load the state from the disk +func (m *state) loadFromDisk() error { + it := m.db.NewIterator() + defer it.Release() + for it.Next() { + vIDBytes := it.Key() + vID, err := ids.ToID(vIDBytes) + if err != nil { + return fmt.Errorf("failed to parse validator ID: %w", err) + } + vdr := &validatorData{ + validationID: vID, + } + if err := parseValidatorData(it.Value(), vdr); err != nil { + return fmt.Errorf("failed to parse validator data: %w", err) + } + if err := m.putData(vID, vdr); err != nil { + return err + } + } + return it.Error() +} + +func (m *state) putData(vID ids.ID, data *validatorData) error { + if _, exists := m.data[vID]; exists { + return fmt.Errorf("validator data already exists for %s", vID) + } + // should never happen + if _, exists := m.index[data.NodeID]; exists { + return fmt.Errorf("validator data already exists for %s", data.NodeID) + } + + m.data[vID] = data + m.index[data.NodeID] = vID + return nil +} + +// getData returns the data for the validator with the given nodeID +// returns ErrNotFound if the data does not exist +func (m *state) getData(nodeID ids.NodeID) (*validatorData, error) { + vID, exists := m.index[nodeID] + if !exists { + return nil, database.ErrNotFound + } + data, exists := m.data[vID] + if !exists { + return nil, database.ErrNotFound + } + return data, nil +} diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go new file mode 100644 index 0000000000..d498d3c506 --- /dev/null +++ b/plugin/evm/validators/state_test.go @@ -0,0 +1,288 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +func TestValidatorState(t *testing.T) { + require := require.New(t) + db := memdb.New() + state, err := NewValidatorState(db) + require.NoError(err) + + // get non-existent uptime + nodeID := ids.GenerateTestNodeID() + vID := ids.GenerateTestID() + _, _, err = state.GetUptime(nodeID) + require.ErrorIs(err, database.ErrNotFound) + + // set non-existent uptime + startTime := time.Now() + err = state.SetUptime(nodeID, 1, startTime) + require.ErrorIs(err, database.ErrNotFound) + + // add new validator + state.AddNewValidator(vID, nodeID, uint64(startTime.Unix()), true) + + // adding the same validator should fail + err = state.AddNewValidator(vID, nodeID, uint64(startTime.Unix()), true) + require.Error(err) + // adding the same nodeID should fail + err = state.AddNewValidator(ids.GenerateTestID(), nodeID, uint64(startTime.Unix()), true) + require.Error(err) + + // get uptime + upDuration, lastUpdated, err := state.GetUptime(nodeID) + require.NoError(err) + require.Equal(time.Duration(0), upDuration) + require.Equal(startTime.Unix(), lastUpdated.Unix()) + + // set uptime + newUpDuration := 2 * time.Minute + newLastUpdated := lastUpdated.Add(time.Hour) + require.NoError(state.SetUptime(nodeID, newUpDuration, newLastUpdated)) + // get new uptime + upDuration, lastUpdated, err = state.GetUptime(nodeID) + require.NoError(err) + require.Equal(newUpDuration, upDuration) + require.Equal(newLastUpdated, lastUpdated) + + // set status + require.NoError(state.SetStatus(vID, false)) + // get status + status, err := state.GetStatus(vID) + require.NoError(err) + require.False(status) + + // delete uptime + state.DeleteValidator(vID) + + // get deleted uptime + _, _, err = state.GetUptime(nodeID) + require.ErrorIs(err, database.ErrNotFound) +} + +func TestWriteValidator(t *testing.T) { + require := require.New(t) + db := memdb.New() + state, err := NewValidatorState(db) + require.NoError(err) + // write empty uptimes + require.NoError(state.WriteValidatorState()) + + // load uptime + nodeID := ids.GenerateTestNodeID() + vID := ids.GenerateTestID() + startTime := time.Now() + state.AddNewValidator(vID, nodeID, uint64(startTime.Unix()), true) + + // write state, should reflect to DB + require.NoError(state.WriteValidatorState()) + require.True(db.Has(vID[:])) + + // set uptime + newUpDuration := 2 * time.Minute + newLastUpdated := startTime.Add(time.Hour) + require.NoError(state.SetUptime(nodeID, newUpDuration, newLastUpdated)) + require.NoError(state.WriteValidatorState()) + + // refresh state, should load from DB + state, err = NewValidatorState(db) + require.NoError(err) + + // get uptime + upDuration, lastUpdated, err := state.GetUptime(nodeID) + require.NoError(err) + require.Equal(newUpDuration, upDuration) + require.Equal(newLastUpdated.Unix(), lastUpdated.Unix()) + + // delete + state.DeleteValidator(vID) + + // write state, should reflect to DB + require.NoError(state.WriteValidatorState()) + require.False(db.Has(vID[:])) +} + +func TestParseValidator(t *testing.T) { + testNodeID, err := ids.NodeIDFromString("NodeID-CaBYJ9kzHvrQFiYWowMkJGAQKGMJqZoat") + require.NoError(t, err) + type test struct { + name string + bytes []byte + expected *validatorData + expectedErr error + } + tests := []test{ + { + name: "nil", + bytes: nil, + expected: &validatorData{ + lastUpdated: time.Unix(0, 0), + startTime: time.Unix(0, 0), + }, + expectedErr: nil, + }, + { + name: "empty", + bytes: []byte{}, + expected: &validatorData{ + lastUpdated: time.Unix(0, 0), + startTime: time.Unix(0, 0), + }, + expectedErr: nil, + }, + { + name: "valid", + bytes: []byte{ + // codec version + 0x00, 0x00, + // up duration + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // last updated + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, + // node ID + 0x7e, 0xef, 0xe8, 0x8a, 0x45, 0xfb, 0x7a, 0xc4, + 0xb0, 0x59, 0xc9, 0x33, 0x71, 0x0a, 0x57, 0x33, + 0xff, 0x9f, 0x4b, 0xab, + // start time + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // status + 0x01, + }, + expected: &validatorData{ + UpDuration: time.Duration(6000000), + LastUpdated: 900000, + lastUpdated: time.Unix(900000, 0), + NodeID: testNodeID, + StartTime: 6000000, + startTime: time.Unix(6000000, 0), + IsActive: true, + }, + }, + { + name: "invalid codec version", + bytes: []byte{ + // codec version + 0x00, 0x02, + // up duration + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // last updated + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, + }, + expected: nil, + expectedErr: codec.ErrUnknownVersion, + }, + { + name: "short byte len", + bytes: []byte{ + // codec version + 0x00, 0x00, + // up duration + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // last updated + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, + }, + expected: nil, + expectedErr: wrappers.ErrInsufficientLength, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + var data validatorData + err := parseValidatorData(tt.bytes, &data) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return + } + require.Equal(tt.expected, &data) + }) + } +} + +func TestStateListener(t *testing.T) { + require := require.New(t) + db := memdb.New() + state, err := NewValidatorState(db) + require.NoError(err) + + expectedvID := ids.GenerateTestID() + expectedNodeID := ids.GenerateTestNodeID() + expectedStartTime := time.Now() + + // add listener + listener := &testCallbackListener{ + t: t, + onAdd: func(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { + require.Equal(expectedvID, vID) + require.Equal(expectedNodeID, nodeID) + require.Equal(uint64(expectedStartTime.Unix()), startTime) + require.True(isActive) + }, + onRemove: func(vID ids.ID, nodeID ids.NodeID) { + require.Equal(expectedvID, vID) + require.Equal(expectedNodeID, nodeID) + }, + onStatusUpdate: func(vID ids.ID, nodeID ids.NodeID, isActive bool) { + require.Equal(expectedvID, vID) + require.Equal(expectedNodeID, nodeID) + require.False(isActive) + }, + } + state.RegisterListener(listener) + + // add new validator + state.AddNewValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) + + // set status + require.NoError(state.SetStatus(expectedvID, false)) + + // remove validator + state.DeleteValidator(expectedvID) +} + +var _ ValidatorsCallbackListener = (*testCallbackListener)(nil) + +type testCallbackListener struct { + t *testing.T + onAdd func(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) + onRemove func(ids.ID, ids.NodeID) + onStatusUpdate func(ids.ID, ids.NodeID, bool) +} + +func (t *testCallbackListener) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { + if t.onAdd != nil { + t.onAdd(vID, nodeID, startTime, isActive) + } else { + t.t.Fail() + } +} + +func (t *testCallbackListener) OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) { + if t.onRemove != nil { + t.onRemove(vID, nodeID) + } else { + t.t.Fail() + } +} + +func (t *testCallbackListener) OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) { + if t.onStatusUpdate != nil { + t.onStatusUpdate(vID, nodeID, isActive) + } else { + t.t.Fail() + } +} From 549d3198c6b48b09fdcaec91f66a916e6e248e5f Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 16:01:38 -0400 Subject: [PATCH 13/98] add pausable uptime manager --- plugin/evm/uptime/pausable_manager.go | 136 ++++++++++++++ plugin/evm/uptime/pausable_manager_test.go | 205 +++++++++++++++++++++ 2 files changed, 341 insertions(+) create mode 100644 plugin/evm/uptime/pausable_manager.go create mode 100644 plugin/evm/uptime/pausable_manager_test.go diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go new file mode 100644 index 0000000000..51e51695dd --- /dev/null +++ b/plugin/evm/uptime/pausable_manager.go @@ -0,0 +1,136 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package uptime + +import ( + "errors" + + "github.com/ava-labs/subnet-evm/plugin/evm/validators" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ validators.ValidatorsCallbackListener = &pausableManager{} + +var ErrPausedDc = errors.New("paused node cannot be disconnected") + +type PausableManager interface { + uptime.Manager + validators.ValidatorsCallbackListener + IsPaused(nodeID ids.NodeID) bool +} + +type pausableManager struct { + uptime.Manager + pausedVdrs set.Set[ids.NodeID] + // connectedVdrs is a set of nodes that are connected to the manager. + // This is used to keep track of the nodes that are connected to the manager + // but are paused. + connectedVdrs set.Set[ids.NodeID] +} + +// NewPausableManager takes an uptime.Manager and returns a PausableManager +func NewPausableManager(manager uptime.Manager) PausableManager { + return &pausableManager{ + pausedVdrs: make(set.Set[ids.NodeID]), + connectedVdrs: make(set.Set[ids.NodeID]), + Manager: manager, + } +} + +// Connect connects the node with the given ID to the uptime.Manager +// If the node is paused, it will not be connected +func (p *pausableManager) Connect(nodeID ids.NodeID) error { + p.connectedVdrs.Add(nodeID) + if !p.IsPaused(nodeID) && !p.Manager.IsConnected(nodeID) { + return p.Manager.Connect(nodeID) + } + return nil +} + +// Disconnect disconnects the node with the given ID from the uptime.Manager +// If the node is paused, it will not be disconnected +// Invariant: we should never have a connected paused node that is disconnecting +func (p *pausableManager) Disconnect(nodeID ids.NodeID) error { + p.connectedVdrs.Remove(nodeID) + if p.Manager.IsConnected(nodeID) { + if p.IsPaused(nodeID) { + // We should never see this case + return ErrPausedDc + } + return p.Manager.Disconnect(nodeID) + } + return nil +} + +// StartTracking starts tracking uptime for the nodes with the given IDs +// If a node is paused, it will not be tracked +func (p *pausableManager) StartTracking(nodeIDs []ids.NodeID) error { + var activeNodeIDs []ids.NodeID + for _, nodeID := range nodeIDs { + if !p.IsPaused(nodeID) { + activeNodeIDs = append(activeNodeIDs, nodeID) + } + } + return p.Manager.StartTracking(activeNodeIDs) +} + +// OnValidatorAdded is called when a validator is added. +// If the node is inactive, it will be paused. +func (p *pausableManager) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { + if !isActive { + p.pause(nodeID) + } +} + +// OnValidatorRemoved is called when a validator is removed. +// If the node is already paused, it will be resumed. +func (p *pausableManager) OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) { + if p.IsPaused(nodeID) { + p.resume(nodeID) + } +} + +// OnValidatorStatusUpdated is called when the status of a validator is updated. +// If the node is active, it will be resumed. If the node is inactive, it will be paused. +func (p *pausableManager) OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) { + if isActive { + p.resume(nodeID) + } else { + p.pause(nodeID) + } +} + +// IsPaused returns true if the node with the given ID is paused. +func (p *pausableManager) IsPaused(nodeID ids.NodeID) bool { + return p.pausedVdrs.Contains(nodeID) +} + +// pause pauses uptime tracking for the node with the given ID +// pause can disconnect the node from the uptime.Manager if it is connected. +// Returns an error if the node is already paused. +func (p *pausableManager) pause(nodeID ids.NodeID) error { + p.pausedVdrs.Add(nodeID) + if p.Manager.IsConnected(nodeID) { + // If the node is connected, then we need to disconnect it from + // manager + // This should be fine in case tracking has not started yet since + // the inner manager should handle disconnects accordingly + return p.Manager.Disconnect(nodeID) + } + return nil +} + +// resume resumes uptime tracking for the node with the given ID +// resume can connect the node to the uptime.Manager if it was connected. +// Returns an error if the node is not paused. +func (p *pausableManager) resume(nodeID ids.NodeID) error { + p.pausedVdrs.Remove(nodeID) + if p.connectedVdrs.Contains(nodeID) { + return p.Manager.Connect(nodeID) + } + return nil +} diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go new file mode 100644 index 0000000000..3d6f996134 --- /dev/null +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -0,0 +1,205 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package uptime + +import ( + "testing" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/stretchr/testify/require" +) + +func TestPausableManager(t *testing.T) { + vID := ids.GenerateTestID() + nodeID0 := ids.GenerateTestNodeID() + startTime := time.Now() + require := require.New(t) + + // Connect before pause before tracking + { + up, clk, _ := setupTestEnv(nodeID0, startTime) + + // Connect before tracking + require.NoError(up.Connect(nodeID0)) + addTime(clk, time.Second) + + // Pause before tracking + up.OnValidatorStatusUpdated(vID, nodeID0, false) + + // Elapse Time + addTime(clk, time.Second) + + // Start tracking + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + currentTime := addTime(clk, time.Second) + // Uptime should not have increased since the node was paused + checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + + // Disconnect + require.NoError(up.Disconnect(nodeID0)) + // Uptime should not have increased + checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + } + + // Paused after tracking resumed after tracking + { + up, clk, _ := setupTestEnv(nodeID0, startTime) + + // Start tracking + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + + // Connect + addTime(clk, time.Second) + require.NoError(up.Connect(nodeID0)) + + // Pause + addTime(clk, time.Second) + up.OnValidatorStatusUpdated(vID, nodeID0, false) + + // Elapse time + currentTime := addTime(clk, 2*time.Second) + // Uptime should be 1 second since the node was paused after 1 sec + checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + + // Disconnect and check uptime + currentTime = addTime(clk, 3*time.Second) + require.NoError(up.Disconnect(nodeID0)) + // Uptime should not have increased since the node was paused + checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + + // Connect again and check uptime + addTime(clk, 4*time.Second) + require.NoError(up.Connect(nodeID0)) + currentTime = addTime(clk, 5*time.Second) + // Uptime should not have increased since the node was paused + checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + + // Resume and check uptime + currentTime = addTime(clk, 6*time.Second) + up.OnValidatorStatusUpdated(vID, nodeID0, true) + // Uptime should not have increased since the node was paused + // and we just resumed it + checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + + // Elapsed time check + currentTime = addTime(clk, 7*time.Second) + // Uptime should increase by 7 seconds above since the node was resumed + checkUptime(t, up, nodeID0, 8*time.Second, currentTime) + } + + // Paused before tracking resumed after tracking + { + up, clk, _ := setupTestEnv(nodeID0, startTime) + + // Pause before tracking + up.OnValidatorStatusUpdated(vID, nodeID0, false) + + // Start tracking + addTime(clk, time.Second) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + + // Connect and check uptime + addTime(clk, 1*time.Second) + require.NoError(up.Connect(nodeID0)) + + currentTime := addTime(clk, 2*time.Second) + // Uptime should not have increased since the node was paused + checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + + // Disconnect and check uptime + currentTime = addTime(clk, 3*time.Second) + require.NoError(up.Disconnect(nodeID0)) + // Uptime should not have increased since the node was paused + checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + + // Connect again and resume + addTime(clk, 4*time.Second) + require.NoError(up.Connect(nodeID0)) + addTime(clk, 5*time.Second) + up.OnValidatorStatusUpdated(vID, nodeID0, true) + + // Check uptime after resume + currentTime = addTime(clk, 6*time.Second) + // Uptime should have increased by 6 seconds since the node was resumed + checkUptime(t, up, nodeID0, 6*time.Second, currentTime) + } + + // Paused after tracking resumed before tracking + { + up, clk, s := setupTestEnv(nodeID0, startTime) + + // Start tracking and connect + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + addTime(clk, time.Second) + require.NoError(up.Connect(nodeID0)) + + // Pause and check uptime + currentTime := addTime(clk, 2*time.Second) + up.OnValidatorStatusUpdated(vID, nodeID0, false) + // Uptime should be 2 seconds since the node was paused after 2 seconds + checkUptime(t, up, nodeID0, 2*time.Second, currentTime) + + // Stop tracking and reinitialize manager + currentTime = addTime(clk, 3*time.Second) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) + up = NewPausableManager(uptime.NewManager(s, clk)) + + // Uptime should not have increased since the node was paused + // and we have not started tracking again + checkUptime(t, up, nodeID0, 2*time.Second, currentTime) + + // Pause and check uptime + up.OnValidatorStatusUpdated(vID, nodeID0, false) + // Uptime should not have increased since the node was paused + checkUptime(t, up, nodeID0, 2*time.Second, currentTime) + + // Resume and check uptime + currentTime = addTime(clk, 5*time.Second) + up.OnValidatorStatusUpdated(vID, nodeID0, true) + // Uptime should have increased by 5 seconds since the node was resumed + checkUptime(t, up, nodeID0, 7*time.Second, currentTime) + + // Start tracking and check elapsed time + currentTime = addTime(clk, 6*time.Second) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + // Uptime should have increased by 6 seconds since we started tracking + // and node was resumed (we assume the node was online until we started tracking) + checkUptime(t, up, nodeID0, 13*time.Second, currentTime) + + // Elapsed time + currentTime = addTime(clk, 7*time.Second) + // Uptime should not have increased since the node was not connected + checkUptime(t, up, nodeID0, 13*time.Second, currentTime) + + // Connect and final uptime check + require.NoError(up.Connect(nodeID0)) + currentTime = addTime(clk, 8*time.Second) + // Uptime should have increased by 8 seconds since the node was connected + checkUptime(t, up, nodeID0, 21*time.Second, currentTime) + } +} + +func setupTestEnv(nodeID ids.NodeID, startTime time.Time) (PausableManager, *mockable.Clock, uptime.State) { + clk := mockable.Clock{} + clk.Set(startTime) + s := uptime.NewTestState() + s.AddNode(nodeID, startTime) + up := NewPausableManager(uptime.NewManager(s, &clk)) + return up, &clk, s +} + +func addTime(clk *mockable.Clock, duration time.Duration) time.Time { + clk.Set(clk.Time().Add(duration)) + return clk.Time() +} + +func checkUptime(t *testing.T, up PausableManager, nodeID ids.NodeID, expectedUptime time.Duration, expectedLastUpdate time.Time) { + uptime, lastUpdated, err := up.CalculateUptime(nodeID) + require.NoError(t, err) + require.Equal(t, expectedLastUpdate.Unix(), lastUpdated.Unix()) + require.Equal(t, expectedUptime, uptime) +} From 9987248f3a004825be57aa0f7be7680c8283dd44 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 16:16:03 -0400 Subject: [PATCH 14/98] remove stuttering name --- plugin/evm/validators/state.go | 126 ++++++++++++++-------------- plugin/evm/validators/state_test.go | 20 ++--- 2 files changed, 73 insertions(+), 73 deletions(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index c5218fef5a..a3a410f35c 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -15,14 +15,14 @@ import ( var _ uptime.State = &state{} -type ValidatorState interface { +type State interface { uptime.State // AddNewValidator adds a new validator to the state AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error // DeleteValidator deletes the validator from the state DeleteValidator(vID ids.ID) error - // WriteValidatorState writes the validator state to the disk - WriteValidatorState() error + // WriteState writes the validator state to the disk + WriteState() error // SetStatus sets the active status of the validator with the given vID SetStatus(vID ids.ID, isActive bool) error @@ -35,11 +35,11 @@ type ValidatorState interface { GetValidatorIDs() set.Set[ids.NodeID] // RegisterListener registers a listener to the state - RegisterListener(ValidatorsCallbackListener) + RegisterListener(StateCallbackListener) } -// ValidatorsCallbackListener is a listener for the validator state -type ValidatorsCallbackListener interface { +// StateCallbackListener is a listener for the validator state +type StateCallbackListener interface { // OnValidatorAdded is called when a new validator is added OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) // OnValidatorRemoved is called when a validator is removed @@ -67,28 +67,28 @@ type state struct { updatedData map[ids.ID]bool // vID -> true(updated)/false(deleted) db database.Database - listeners []ValidatorsCallbackListener + listeners []StateCallbackListener } -// NewValidatorState creates a new ValidatorState, it also loads the data from the disk -func NewValidatorState(db database.Database) (ValidatorState, error) { - m := &state{ +// NewState creates a new State, it also loads the data from the disk +func NewState(db database.Database) (State, error) { + s := &state{ index: make(map[ids.NodeID]ids.ID), data: make(map[ids.ID]*validatorData), updatedData: make(map[ids.ID]bool), db: db, } - if err := m.loadFromDisk(); err != nil { + if err := s.loadFromDisk(); err != nil { return nil, fmt.Errorf("failed to load data from disk: %w", err) } - return m, nil + return s, nil } // GetUptime returns the uptime of the validator with the given nodeID -func (m *state) GetUptime( +func (s *state) GetUptime( nodeID ids.NodeID, ) (time.Duration, time.Time, error) { - data, err := m.getData(nodeID) + data, err := s.getData(nodeID) if err != nil { return 0, time.Time{}, err } @@ -96,25 +96,25 @@ func (m *state) GetUptime( } // SetUptime sets the uptime of the validator with the given nodeID -func (m *state) SetUptime( +func (s *state) SetUptime( nodeID ids.NodeID, upDuration time.Duration, lastUpdated time.Time, ) error { - data, err := m.getData(nodeID) + data, err := s.getData(nodeID) if err != nil { return err } data.UpDuration = upDuration data.lastUpdated = lastUpdated - m.updatedData[data.validationID] = true + s.updatedData[data.validationID] = true return nil } // GetStartTime returns the start time of the validator with the given nodeID -func (m *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { - data, err := m.getData(nodeID) +func (s *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { + data, err := s.getData(nodeID) if err != nil { return time.Time{}, err } @@ -122,8 +122,8 @@ func (m *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { } // AddNewValidator adds a new validator to the state -// the new validator is marked as updated and will be written to the disk when WriteValidatorState is called -func (m *state) AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { +// the new validator is marked as updated and will be written to the disk when WriteState is called +func (s *state) AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { startTimeUnix := time.Unix(int64(startTimestamp), 0) data := &validatorData{ @@ -136,44 +136,44 @@ func (m *state) AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp ui lastUpdated: startTimeUnix, startTime: startTimeUnix, } - if err := m.putData(vID, data); err != nil { + if err := s.putData(vID, data); err != nil { return err } - m.updatedData[vID] = true + s.updatedData[vID] = true - for _, listener := range m.listeners { + for _, listener := range s.listeners { listener.OnValidatorAdded(vID, nodeID, startTimestamp, isActive) } return nil } // DeleteValidator marks the validator as deleted -// marked validator will be deleted from disk when WriteValidatorState is called -func (m *state) DeleteValidator(vID ids.ID) error { - data, exists := m.data[vID] +// marked validator will be deleted from disk when WriteState is called +func (s *state) DeleteValidator(vID ids.ID) error { + data, exists := s.data[vID] if !exists { return database.ErrNotFound } - delete(m.data, data.validationID) - delete(m.index, data.NodeID) + delete(s.data, data.validationID) + delete(s.index, data.NodeID) // mark as deleted for WriteValidator - m.updatedData[data.validationID] = false + s.updatedData[data.validationID] = false - for _, listener := range m.listeners { + for _, listener := range s.listeners { listener.OnValidatorRemoved(vID, data.NodeID) } return nil } -// WriteValidatorState writes the updated state to the disk -func (m *state) WriteValidatorState() error { +// WriteState writes the updated state to the disk +func (s *state) WriteState() error { // TODO: consider adding batch size - batch := m.db.NewBatch() - for vID, updated := range m.updatedData { + batch := s.db.NewBatch() + for vID, updated := range s.updatedData { if updated { - data := m.data[vID] + data := s.data[vID] data.LastUpdated = uint64(data.lastUpdated.Unix()) // should never change but in case data.StartTime = uint64(data.startTime.Unix()) @@ -191,29 +191,29 @@ func (m *state) WriteValidatorState() error { } } // we're done, remove the updated marker - delete(m.updatedData, vID) + delete(s.updatedData, vID) } return batch.Write() } // SetStatus sets the active status of the validator with the given vID -func (m *state) SetStatus(vID ids.ID, isActive bool) error { - data, exists := m.data[vID] +func (s *state) SetStatus(vID ids.ID, isActive bool) error { + data, exists := s.data[vID] if !exists { return database.ErrNotFound } data.IsActive = isActive - m.updatedData[vID] = true + s.updatedData[vID] = true - for _, listener := range m.listeners { + for _, listener := range s.listeners { listener.OnValidatorStatusUpdated(vID, data.NodeID, isActive) } return nil } // GetStatus returns the active status of the validator with the given vID -func (m *state) GetStatus(vID ids.ID) (bool, error) { - data, exists := m.data[vID] +func (s *state) GetStatus(vID ids.ID) (bool, error) { + data, exists := s.data[vID] if !exists { return false, database.ErrNotFound } @@ -221,18 +221,18 @@ func (m *state) GetStatus(vID ids.ID) (bool, error) { } // GetValidationIDs returns the validation IDs in the state -func (m *state) GetValidationIDs() set.Set[ids.ID] { - ids := set.NewSet[ids.ID](len(m.data)) - for vID := range m.data { +func (s *state) GetValidationIDs() set.Set[ids.ID] { + ids := set.NewSet[ids.ID](len(s.data)) + for vID := range s.data { ids.Add(vID) } return ids } // GetValidatorIDs returns the validator IDs in the state -func (m *state) GetValidatorIDs() set.Set[ids.NodeID] { - ids := set.NewSet[ids.NodeID](len(m.index)) - for nodeID := range m.index { +func (s *state) GetValidatorIDs() set.Set[ids.NodeID] { + ids := set.NewSet[ids.NodeID](len(s.index)) + for nodeID := range s.index { ids.Add(nodeID) } return ids @@ -240,11 +240,11 @@ func (m *state) GetValidatorIDs() set.Set[ids.NodeID] { // RegisterListener registers a listener to the state // the listener will be notified of current validators via OnValidatorAdded -func (m *state) RegisterListener(listener ValidatorsCallbackListener) { - m.listeners = append(m.listeners, listener) +func (s *state) RegisterListener(listener StateCallbackListener) { + s.listeners = append(s.listeners, listener) // notify the listener of the current state - for vID, data := range m.data { + for vID, data := range s.data { listener.OnValidatorAdded(vID, data.NodeID, uint64(data.startTime.Unix()), data.IsActive) } } @@ -262,8 +262,8 @@ func parseValidatorData(bytes []byte, data *validatorData) error { } // Load the state from the disk -func (m *state) loadFromDisk() error { - it := m.db.NewIterator() +func (s *state) loadFromDisk() error { + it := s.db.NewIterator() defer it.Release() for it.Next() { vIDBytes := it.Key() @@ -277,35 +277,35 @@ func (m *state) loadFromDisk() error { if err := parseValidatorData(it.Value(), vdr); err != nil { return fmt.Errorf("failed to parse validator data: %w", err) } - if err := m.putData(vID, vdr); err != nil { + if err := s.putData(vID, vdr); err != nil { return err } } return it.Error() } -func (m *state) putData(vID ids.ID, data *validatorData) error { - if _, exists := m.data[vID]; exists { +func (s *state) putData(vID ids.ID, data *validatorData) error { + if _, exists := s.data[vID]; exists { return fmt.Errorf("validator data already exists for %s", vID) } // should never happen - if _, exists := m.index[data.NodeID]; exists { + if _, exists := s.index[data.NodeID]; exists { return fmt.Errorf("validator data already exists for %s", data.NodeID) } - m.data[vID] = data - m.index[data.NodeID] = vID + s.data[vID] = data + s.index[data.NodeID] = vID return nil } // getData returns the data for the validator with the given nodeID // returns ErrNotFound if the data does not exist -func (m *state) getData(nodeID ids.NodeID) (*validatorData, error) { - vID, exists := m.index[nodeID] +func (s *state) getData(nodeID ids.NodeID) (*validatorData, error) { + vID, exists := s.index[nodeID] if !exists { return nil, database.ErrNotFound } - data, exists := m.data[vID] + data, exists := s.data[vID] if !exists { return nil, database.ErrNotFound } diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index d498d3c506..d4398553b3 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -16,10 +16,10 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -func TestValidatorState(t *testing.T) { +func TestState(t *testing.T) { require := require.New(t) db := memdb.New() - state, err := NewValidatorState(db) + state, err := NewState(db) require.NoError(err) // get non-existent uptime @@ -77,10 +77,10 @@ func TestValidatorState(t *testing.T) { func TestWriteValidator(t *testing.T) { require := require.New(t) db := memdb.New() - state, err := NewValidatorState(db) + state, err := NewState(db) require.NoError(err) // write empty uptimes - require.NoError(state.WriteValidatorState()) + require.NoError(state.WriteState()) // load uptime nodeID := ids.GenerateTestNodeID() @@ -89,17 +89,17 @@ func TestWriteValidator(t *testing.T) { state.AddNewValidator(vID, nodeID, uint64(startTime.Unix()), true) // write state, should reflect to DB - require.NoError(state.WriteValidatorState()) + require.NoError(state.WriteState()) require.True(db.Has(vID[:])) // set uptime newUpDuration := 2 * time.Minute newLastUpdated := startTime.Add(time.Hour) require.NoError(state.SetUptime(nodeID, newUpDuration, newLastUpdated)) - require.NoError(state.WriteValidatorState()) + require.NoError(state.WriteState()) // refresh state, should load from DB - state, err = NewValidatorState(db) + state, err = NewState(db) require.NoError(err) // get uptime @@ -112,7 +112,7 @@ func TestWriteValidator(t *testing.T) { state.DeleteValidator(vID) // write state, should reflect to DB - require.NoError(state.WriteValidatorState()) + require.NoError(state.WriteState()) require.False(db.Has(vID[:])) } @@ -216,7 +216,7 @@ func TestParseValidator(t *testing.T) { func TestStateListener(t *testing.T) { require := require.New(t) db := memdb.New() - state, err := NewValidatorState(db) + state, err := NewState(db) require.NoError(err) expectedvID := ids.GenerateTestID() @@ -254,7 +254,7 @@ func TestStateListener(t *testing.T) { state.DeleteValidator(expectedvID) } -var _ ValidatorsCallbackListener = (*testCallbackListener)(nil) +var _ StateCallbackListener = (*testCallbackListener)(nil) type testCallbackListener struct { t *testing.T From 0f16af2a09a80658d3a2528855ff8c222d48e6a8 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 16:20:58 -0400 Subject: [PATCH 15/98] rename state listener --- plugin/evm/uptime/pausable_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go index 51e51695dd..841c84b45d 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/uptime/pausable_manager.go @@ -13,13 +13,13 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -var _ validators.ValidatorsCallbackListener = &pausableManager{} +var _ validators.StateCallbackListener = &pausableManager{} var ErrPausedDc = errors.New("paused node cannot be disconnected") type PausableManager interface { uptime.Manager - validators.ValidatorsCallbackListener + validators.StateCallbackListener IsPaused(nodeID ids.NodeID) bool } From 5f64be39e7852948f2798875c41e12f407c00a54 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 16:24:02 -0400 Subject: [PATCH 16/98] add uptime tracking to VM --- plugin/evm/block.go | 2 +- plugin/evm/config.go | 3 + plugin/evm/mock_validator_state.go | 97 ++++++++++++ plugin/evm/service.go | 30 ++++ plugin/evm/syncervm_test.go | 2 +- plugin/evm/vm.go | 234 +++++++++++++++++++++++------ 6 files changed, 322 insertions(+), 46 deletions(-) create mode 100644 plugin/evm/mock_validator_state.go diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 9d3d238d02..b150e45286 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -215,7 +215,7 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ // If the chain is still bootstrapping, we can assume that all blocks we are verifying have // been accepted by the network (so the predicate was validated by the network when the // block was originally verified). - if b.vm.bootstrapped { + if b.vm.bootstrapped.Get() { if err := b.verifyPredicates(predicateContext); err != nil { return fmt.Errorf("failed to verify predicates: %w", err) } diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 9f59775ba9..c59aeb3429 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -60,6 +60,7 @@ const ( // - state sync time: ~6 hrs. defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request + defaultUptimeAPIEnabled = true ) var ( @@ -88,6 +89,7 @@ type Config struct { // Subnet EVM APIs SnowmanAPIEnabled bool `json:"snowman-api-enabled"` + UptimeAPIEnabled bool `json:"uptime-api-enabled"` AdminAPIEnabled bool `json:"admin-api-enabled"` AdminAPIDir string `json:"admin-api-dir"` WarpAPIEnabled bool `json:"warp-api-enabled"` @@ -284,6 +286,7 @@ func (c *Config) SetDefaults() { c.StateSyncRequestSize = defaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize + c.UptimeAPIEnabled = defaultUptimeAPIEnabled } func (d *Duration) UnmarshalJSON(data []byte) (err error) { diff --git a/plugin/evm/mock_validator_state.go b/plugin/evm/mock_validator_state.go new file mode 100644 index 0000000000..b838b8c9db --- /dev/null +++ b/plugin/evm/mock_validator_state.go @@ -0,0 +1,97 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" +) + +var ( + DefaultStartTime = uint64(time.Date(2024, time.July, 30, 0, 0, 0, 0, time.UTC).Unix()) + DefaultSetWeightNonce = uint64(0) + DefaultIsActive = true +) + +type ValidatorOutput struct { + NodeID ids.NodeID + VID ids.ID + IsActive bool + StartTime uint64 + SetWeightNonce uint64 + Weight uint64 + BLSPublicKey *bls.PublicKey +} + +type MockedValidatorState interface { + validators.State + // GetCurrentValidatorSet returns the current validator set for the provided subnet + // Returned map contains the ValidationID as the key and the ValidatorOutput as the value + GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*ValidatorOutput, error) +} + +type recordedValidator struct { + StartTime uint64 + SetWeightNonce uint64 + IsActive bool +} + +type MockValidatorState struct { + validators.State + recordedValidators map[ids.NodeID]recordedValidator +} + +func NewMockValidatorState(pState validators.State) MockedValidatorState { + return &MockValidatorState{ + State: pState, + recordedValidators: make(map[ids.NodeID]recordedValidator), + } +} + +func (t *MockValidatorState) RecordValidator(nodeID ids.NodeID, startTime, setWeightNonce uint64) { + t.recordedValidators[nodeID] = recordedValidator{ + StartTime: startTime, + SetWeightNonce: setWeightNonce, + IsActive: true, + } +} + +func (t *MockValidatorState) GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*ValidatorOutput, error) { + currentPHeight, err := t.GetCurrentHeight(ctx) + if err != nil { + return nil, err + } + validatorSet, err := t.GetValidatorSet(ctx, currentPHeight, subnetID) + if err != nil { + return nil, err + } + output := make(map[ids.ID]*ValidatorOutput, len(validatorSet)) + for key, value := range validatorSet { + startTime, isActive, setWeightNonce := DefaultStartTime, DefaultIsActive, DefaultSetWeightNonce + if recordedValidator, ok := t.recordedValidators[key]; ok { + startTime = recordedValidator.StartTime + isActive = recordedValidator.IsActive + setWeightNonce = recordedValidator.SetWeightNonce + } + // Converts the key to a validationID + // TODO: This is a temporary solution until we can use the correct ID type + validationID, err := ids.ToID(key.Bytes()) + if err != nil { + return nil, err + } + output[validationID] = &ValidatorOutput{ + NodeID: value.NodeID, + IsActive: isActive, + StartTime: startTime, + SetWeightNonce: setWeightNonce, + Weight: value.Weight, + BLSPublicKey: value.PublicKey, + } + } + return output, nil +} diff --git a/plugin/evm/service.go b/plugin/evm/service.go index a8fe61cbc0..a73571c4a9 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -6,7 +6,11 @@ package evm import ( "context" "math/big" + "time" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -36,3 +40,29 @@ func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { api.vm.builder.signalTxsReady() return nil } + +type UptimeAPI struct { + ctx *snow.Context + calculator uptime.LockedCalculator +} + +// TODO: add StartTime +type GetUptimeResponse struct { + NodeID ids.NodeID `json:"nodeID"` + Uptime time.Duration `json:"uptime"` +} + +// GetUptime returns the uptime of the node +func (api *UptimeAPI) GetUptime(ctx context.Context, nodeID ids.NodeID) (*GetUptimeResponse, error) { + uptime, _, err := api.calculator.CalculateUptime(nodeID) + if err != nil { + return nil, err + } + + return &GetUptimeResponse{ + NodeID: nodeID, + Uptime: uptime, + }, nil +} + +// TODO: add GetUptime for currently tracked peers diff --git a/plugin/evm/syncervm_test.go b/plugin/evm/syncervm_test.go index 5a5b260a2d..ffb572b1b9 100644 --- a/plugin/evm/syncervm_test.go +++ b/plugin/evm/syncervm_test.go @@ -496,7 +496,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { // check we can transition to [NormalOp] state and continue to process blocks. require.NoError(syncerVM.SetState(context.Background(), snow.NormalOp)) - require.True(syncerVM.bootstrapped) + require.True(syncerVM.bootstrapped.Get()) // Generate blocks after we have entered normal consensus as well generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 376a8a12c7..260fd26941 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -36,6 +36,8 @@ import ( "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/peer" "github.com/ava-labs/subnet-evm/plugin/evm/message" + "github.com/ava-labs/subnet-evm/plugin/evm/uptime" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/triedb" "github.com/ava-labs/subnet-evm/triedb/hashdb" @@ -73,10 +75,12 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + avalancheUptime "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/chain" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" @@ -118,6 +122,9 @@ const ( txGossipThrottlingPeriod = 10 * time.Second txGossipThrottlingLimit = 2 txGossipPollSize = 1 + + // TODO: decide for a sane value for this + loadValidatorFrequency = 5 * time.Minute ) // Define the API endpoints for the VM @@ -130,11 +137,12 @@ const ( var ( // Set last accepted key to be longer than the keys used to store accepted block IDs. - lastAcceptedKey = []byte("last_accepted_key") - acceptedPrefix = []byte("snowman_accepted") - metadataPrefix = []byte("metadata") - warpPrefix = []byte("warp") - ethDBPrefix = []byte("ethdb") + lastAcceptedKey = []byte("last_accepted_key") + acceptedPrefix = []byte("snowman_accepted") + metadataPrefix = []byte("metadata") + warpPrefix = []byte("warp") + ethDBPrefix = []byte("ethdb") + validatorsDBPrefix = []byte("validators") ) var ( @@ -192,9 +200,6 @@ type VM struct { // [db] is the VM's current database managed by ChainState db *versiondb.Database - // metadataDB is used to store one off keys. - metadataDB database.Database - // [chaindb] is the database supplied to the Ethereum backend chaindb ethdb.Database @@ -202,10 +207,6 @@ type VM struct { // block. acceptedBlockDB database.Database - // [warpDB] is used to store warp message signatures - // set to a prefixDB with the prefix [warpPrefix] - warpDB database.Database - toEngine chan<- commonEng.Message syntacticBlockValidator BlockValidator @@ -229,7 +230,7 @@ type VM struct { // Metrics sdkMetrics *prometheus.Registry - bootstrapped bool + bootstrapped avalancheUtils.Atomic[bool] logger SubnetEVMLogger // State sync server and client @@ -245,6 +246,13 @@ type VM struct { ethTxGossipHandler p2p.Handler ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper + + uptimeManager uptime.PausableManager + LockedCalculator avalancheUptime.LockedCalculator + + // TODO/: remove this after implementing GetCurrentValidatorSet + mockedPChainValidatorState MockedValidatorState + validatorState validators.State } // Initialize implements the snowman.ChainVM interface @@ -308,11 +316,6 @@ func (vm *VM) Initialize( vm.chaindb = rawdb.NewDatabase(Database{prefixdb.NewNested(ethDBPrefix, db)}) vm.db = versiondb.New(db) vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) - vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) - // Note warpDB is not part of versiondb because it is not necessary - // that warp signatures are committed to the database atomically with - // the last accepted block. - vm.warpDB = prefixdb.New(warpPrefix, db) if vm.config.InspectDatabase { start := time.Now() @@ -481,17 +484,34 @@ func (vm *VM) Initialize( vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) + vm.mockedPChainValidatorState = NewMockValidatorState(vm.ctx.ValidatorState) + validatorsDB := prefixdb.New(validatorsDBPrefix, db) + vm.validatorState, err = validators.NewState(validatorsDB) + if err != nil { + return fmt.Errorf("failed to initialize validator state: %w", err) + } + // TODO: add a configuration to disable tracking uptime + vm.uptimeManager = uptime.NewPausableManager(avalancheUptime.NewManager(vm.validatorState, &vm.clock)) + vm.LockedCalculator = avalancheUptime.NewLockedCalculator() + vm.LockedCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) + vm.validatorState.RegisterListener(vm.uptimeManager) + // Initialize warp backend offchainWarpMessages := make([][]byte, len(vm.config.WarpOffChainMessages)) for i, hexMsg := range vm.config.WarpOffChainMessages { offchainWarpMessages[i] = []byte(hexMsg) } + + // Note warpDB is not part of versiondb because it is not necessary + // that warp signatures are committed to the database atomically with + // the last accepted block. + warpDB := prefixdb.New(warpPrefix, db) vm.warpBackend, err = warp.NewBackend( vm.ctx.NetworkID, vm.ctx.ChainID, vm.ctx.WarpSigner, vm, - vm.warpDB, + warpDB, warpSignatureCacheSize, offchainWarpMessages, ) @@ -583,6 +603,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } + metadataDB := prefixdb.New(metadataPrefix, vm.db) vm.StateSyncClient = NewStateSyncClient(&stateSyncClientConfig{ chain: vm.eth, state: vm.State, @@ -601,7 +622,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { stateSyncRequestSize: vm.config.StateSyncRequestSize, lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around chaindb: vm.chaindb, - metadataDB: vm.metadataDB, + metadataDB: metadataDB, acceptedBlockDB: vm.acceptedBlockDB, db: vm.db, toEngine: vm.toEngine, @@ -663,38 +684,56 @@ func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { func (vm *VM) SetState(_ context.Context, state snow.State) error { switch state { case snow.StateSyncing: - vm.bootstrapped = false + vm.bootstrapped.Set(false) return nil case snow.Bootstrapping: - vm.bootstrapped = false - if err := vm.StateSyncClient.Error(); err != nil { - return err - } - // After starting bootstrapping, do not attempt to resume a previous state sync. - if err := vm.StateSyncClient.ClearOngoingSummary(); err != nil { - return err - } - // Ensure snapshots are initialized before bootstrapping (i.e., if state sync is skipped). - // Note calling this function has no effect if snapshots are already initialized. - vm.blockChain.InitializeSnapshots() - return nil + return vm.onBootstrapStarted() case snow.NormalOp: - // Initialize goroutines related to block building once we enter normal operation as there is no need to handle mempool gossip before this point. - if err := vm.initBlockBuilding(); err != nil { - return fmt.Errorf("failed to initialize block building: %w", err) - } - vm.bootstrapped = true - return nil + return vm.onNormalOperationsStarted() default: return snow.ErrUnknownState } } -// initBlockBuilding starts goroutines to manage block building -func (vm *VM) initBlockBuilding() error { +// onBootstrapStarted marks this VM as bootstrapping +func (vm *VM) onBootstrapStarted() error { + vm.bootstrapped.Set(false) + if err := vm.StateSyncClient.Error(); err != nil { + return err + } + // After starting bootstrapping, do not attempt to resume a previous state sync. + if err := vm.StateSyncClient.ClearOngoingSummary(); err != nil { + return err + } + // Ensure snapshots are initialized before bootstrapping (i.e., if state sync is skipped). + // Note calling this function has no effect if snapshots are already initialized. + vm.blockChain.InitializeSnapshots() + + return nil +} + +// onNormalOperationsStarted marks this VM as bootstrapped +func (vm *VM) onNormalOperationsStarted() error { + if vm.bootstrapped.Get() { + return nil + } + vm.bootstrapped.Set(true) + ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel + // update validators first + vm.performValidatorUpdate(ctx) + vdrIDs := vm.validatorState.GetValidatorIDs().List() + // then start tracking with updated validators + if err := vm.uptimeManager.StartTracking(vdrIDs); err != nil { + return err + } + // dispatch validator set update + go vm.dispatchUpdateValidators(ctx) + + // Initialize goroutines related to block building + // once we enter normal operation as there is no need to handle mempool gossip before this point. ethTxGossipMarshaller := GossipEthTxMarshaller{} ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.validators)) ethTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) @@ -703,7 +742,7 @@ func (vm *VM) initBlockBuilding() error { } ethTxPool, err := NewGossipEthTxPool(vm.txPool, vm.sdkMetrics) if err != nil { - return err + return fmt.Errorf("failed to initialize gossip eth tx pool: %w", err) } vm.shutdownWg.Add(1) go func() { @@ -761,7 +800,7 @@ func (vm *VM) initBlockBuilding() error { } if err := vm.Network.AddHandler(p2p.TxGossipHandlerID, vm.ethTxGossipHandler); err != nil { - return err + return fmt.Errorf("failed to add eth tx gossip handler: %w", err) } if vm.ethTxPullGossiper == nil { @@ -814,7 +853,7 @@ func (vm *VM) setAppRequestHandlers() { } // Shutdown implements the snowman.ChainVM interface -func (vm *VM) Shutdown(context.Context) error { +func (vm *VM) Shutdown(ctx context.Context) error { if vm.ctx == nil { return nil } @@ -829,6 +868,15 @@ func (vm *VM) Shutdown(context.Context) error { vm.eth.Stop() log.Info("Ethereum backend stop completed") vm.shutdownWg.Wait() + if vm.bootstrapped.Get() { + vdrIDs := vm.validatorState.GetValidatorIDs().List() + if err := vm.uptimeManager.StopTracking(vdrIDs); err != nil { + return fmt.Errorf("failed to stop tracking uptime: %w", err) + } + if err := vm.validatorState.WriteState(); err != nil { + return fmt.Errorf("failed to write validator: %w", err) + } + } log.Info("Subnet-EVM Shutdown completed") return nil } @@ -1031,6 +1079,13 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "warp") } + if vm.config.UptimeAPIEnabled { + if err := handler.RegisterName("uptime", &UptimeAPI{vm.ctx, vm.LockedCalculator}); err != nil { + return nil, err + } + enabledAPIs = append(enabledAPIs, "uptime") + } + log.Info(fmt.Sprintf("Enabled APIs: %s", strings.Join(enabledAPIs, ", "))) apis[ethRPCEndpoint] = handler apis[ethWSEndpoint] = handler.WebsocketHandlerWithDuration( @@ -1181,3 +1236,94 @@ func attachEthService(handler *rpc.Server, apis []rpc.API, names []string) error return nil } + +func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { + if err := vm.uptimeManager.Connect(nodeID); err != nil { + return err + } + return vm.Network.Connected(ctx, nodeID, version) +} + +func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := vm.uptimeManager.Disconnect(nodeID); err != nil { + return fmt.Errorf("uptime manager failed to disconnect node %s: %w", nodeID, err) + } + + return vm.Network.Disconnected(ctx, nodeID) +} + +func (vm *VM) dispatchUpdateValidators(ctx context.Context) { + ticker := time.NewTicker(loadValidatorFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + vm.ctx.Lock.Lock() + vm.performValidatorUpdate(ctx) + vm.ctx.Lock.Unlock() + case <-ctx.Done(): + return + } + } +} + +// performValidatorUpdate updates the validator state with the current validator set +// and writes the state to the database. +func (vm *VM) performValidatorUpdate(ctx context.Context) error { + now := time.Now() + vm.logger.Debug("performing validator update") + // get current validator set + currentValidatorSet, err := vm.mockedPChainValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) + if err != nil { + return err + } + + // load the current validator set into the validator state + if err := vm.loadCurrentValidators(currentValidatorSet); err != nil { + return err + } + + // write validators to the database + if err := vm.validatorState.WriteState(); err != nil { + return err + } + + vm.logger.Debug("validator update complete", "duration", time.Since(now)) + return nil +} + +// TODO: cache the last updated height and then load if needed +func (vm *VM) loadCurrentValidators(vdrs map[ids.ID]*ValidatorOutput) error { + currentValidationIDs := vm.validatorState.GetValidationIDs() + // first check if we need to delete any existing validators + for vID := range currentValidationIDs { + // if the validator is not in the new set of validators + // delete the validator + if _, exists := vdrs[vID]; !exists { + vm.validatorState.DeleteValidator(vID) + } + } + + // then load the new validators + for vID, vdr := range vdrs { + if currentValidationIDs.Contains(vID) { + // Check if IsActive has changed + isActive, err := vm.validatorState.GetStatus(vID) + if err != nil { + return err + } + if isActive != vdr.IsActive { + if err := vm.validatorState.SetStatus(vID, vdr.IsActive); err != nil { + return err + } + } + } else { + err := vm.validatorState.AddNewValidator(vdr.VID, vdr.NodeID, vdr.StartTime, vdr.IsActive) + if err != nil { + return err + } + } + } + return nil +} From 6ff4954209a973300e517b95c6587f42cdfa55ae Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 16:26:13 -0400 Subject: [PATCH 17/98] remove unused param --- plugin/evm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 260fd26941..7cc13b3209 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -853,7 +853,7 @@ func (vm *VM) setAppRequestHandlers() { } // Shutdown implements the snowman.ChainVM interface -func (vm *VM) Shutdown(ctx context.Context) error { +func (vm *VM) Shutdown(context.Context) error { if vm.ctx == nil { return nil } From 55927b2b6c54f3da8118bce788401bb61a645835 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 16:29:27 -0400 Subject: [PATCH 18/98] add wg for update validators --- plugin/evm/vm.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 7cc13b3209..323f02d78e 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -730,7 +730,11 @@ func (vm *VM) onNormalOperationsStarted() error { return err } // dispatch validator set update - go vm.dispatchUpdateValidators(ctx) + vm.shutdownWg.Add(1) + go func() { + vm.dispatchUpdateValidators(ctx) + vm.shutdownWg.Done() + }() // Initialize goroutines related to block building // once we enter normal operation as there is no need to handle mempool gossip before this point. From bdc2cc31e3504077acc745ac5f210efe232ca873 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 17:29:59 -0400 Subject: [PATCH 19/98] update state before network shutdown --- plugin/evm/vm.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 323f02d78e..bd8cc0a2a6 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -864,14 +864,6 @@ func (vm *VM) Shutdown(context.Context) error { if vm.cancel != nil { vm.cancel() } - vm.Network.Shutdown() - if err := vm.StateSyncClient.Shutdown(); err != nil { - log.Error("error stopping state syncer", "err", err) - } - close(vm.shutdownChan) - vm.eth.Stop() - log.Info("Ethereum backend stop completed") - vm.shutdownWg.Wait() if vm.bootstrapped.Get() { vdrIDs := vm.validatorState.GetValidatorIDs().List() if err := vm.uptimeManager.StopTracking(vdrIDs); err != nil { @@ -881,6 +873,14 @@ func (vm *VM) Shutdown(context.Context) error { return fmt.Errorf("failed to write validator: %w", err) } } + vm.Network.Shutdown() + if err := vm.StateSyncClient.Shutdown(); err != nil { + log.Error("error stopping state syncer", "err", err) + } + close(vm.shutdownChan) + vm.eth.Stop() + log.Info("Ethereum backend stop completed") + vm.shutdownWg.Wait() log.Info("Subnet-EVM Shutdown completed") return nil } From 657e542a2f3cbd232143ff66be22cab7f5a80b9d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 16 Sep 2024 17:30:12 -0400 Subject: [PATCH 20/98] restart bootstrapping status in test --- plugin/evm/vm_upgrade_bytes_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugin/evm/vm_upgrade_bytes_test.go b/plugin/evm/vm_upgrade_bytes_test.go index 9907f98531..f8043e3499 100644 --- a/plugin/evm/vm_upgrade_bytes_test.go +++ b/plugin/evm/vm_upgrade_bytes_test.go @@ -33,9 +33,7 @@ import ( "github.com/stretchr/testify/require" ) -var ( - DefaultEtnaTime = uint64(upgrade.GetConfig(testNetworkID).EtnaTime.Unix()) -) +var DefaultEtnaTime = uint64(upgrade.GetConfig(testNetworkID).EtnaTime.Unix()) func TestVMUpgradeBytesPrecompile(t *testing.T) { // Make a TxAllowListConfig upgrade at genesis and convert it to JSON to apply as upgradeBytes. @@ -113,6 +111,9 @@ func TestVMUpgradeBytesPrecompile(t *testing.T) { } }() // Set the VM's state to NormalOp to initialize the tx pool. + if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { + t.Fatal(err) + } if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } From 9eaa3f37d2bbfb7f8b32570e4c6a1dc68da2b6da Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 17 Sep 2024 15:42:43 -0400 Subject: [PATCH 21/98] add get validator to state --- plugin/evm/validators/state.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index a3a410f35c..ab4a0b1175 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -33,6 +33,8 @@ type State interface { GetValidationIDs() set.Set[ids.ID] // GetValidatorIDs returns the validator node IDs in the state GetValidatorIDs() set.Set[ids.NodeID] + // GetValidator returns the validator data for the given nodeID + GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) // RegisterListener registers a listener to the state RegisterListener(StateCallbackListener) @@ -48,6 +50,13 @@ type StateCallbackListener interface { OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) } +type ValidatorOutput struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + StartTime time.Time `json:"startTime"` + IsActive bool `json:"isActive"` +} + type validatorData struct { UpDuration time.Duration `serialize:"true"` LastUpdated uint64 `serialize:"true"` @@ -238,6 +247,20 @@ func (s *state) GetValidatorIDs() set.Set[ids.NodeID] { return ids } +// GetValidator returns the validator data for the given nodeID +func (s *state) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { + data, err := s.getData(nodeID) + if err != nil { + return nil, err + } + return &ValidatorOutput{ + ValidationID: data.validationID, + NodeID: data.NodeID, + StartTime: data.startTime, + IsActive: data.IsActive, + }, nil +} + // RegisterListener registers a listener to the state // the listener will be notified of current validators via OnValidatorAdded func (s *state) RegisterListener(listener StateCallbackListener) { From 66747f8cd3cd47879b1473f15eff36ac268a3ad1 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 17 Sep 2024 15:42:58 -0400 Subject: [PATCH 22/98] rename uptime to validator --- plugin/evm/config.go | 14 ++++---- plugin/evm/service.go | 81 +++++++++++++++++++++++++++++++++---------- plugin/evm/vm.go | 28 ++++++++------- 3 files changed, 86 insertions(+), 37 deletions(-) diff --git a/plugin/evm/config.go b/plugin/evm/config.go index c59aeb3429..5bfddec33d 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -60,7 +60,7 @@ const ( // - state sync time: ~6 hrs. defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request - defaultUptimeAPIEnabled = true + defaultValidatorsAPIEnabled = true ) var ( @@ -88,11 +88,11 @@ type Config struct { AirdropFile string `json:"airdrop"` // Subnet EVM APIs - SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - UptimeAPIEnabled bool `json:"uptime-api-enabled"` - AdminAPIEnabled bool `json:"admin-api-enabled"` - AdminAPIDir string `json:"admin-api-dir"` - WarpAPIEnabled bool `json:"warp-api-enabled"` + SnowmanAPIEnabled bool `json:"snowman-api-enabled"` + ValidatorsAPIEnabled bool `json:"validators-api-enabled"` + AdminAPIEnabled bool `json:"admin-api-enabled"` + AdminAPIDir string `json:"admin-api-dir"` + WarpAPIEnabled bool `json:"warp-api-enabled"` // EnabledEthAPIs is a list of Ethereum services that should be enabled // If none is specified, then we use the default list [defaultEnabledAPIs] @@ -286,7 +286,7 @@ func (c *Config) SetDefaults() { c.StateSyncRequestSize = defaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize - c.UptimeAPIEnabled = defaultUptimeAPIEnabled + c.ValidatorsAPIEnabled = defaultValidatorsAPIEnabled } func (d *Duration) UnmarshalJSON(data []byte) (err error) { diff --git a/plugin/evm/service.go b/plugin/evm/service.go index a73571c4a9..4f1a5c72fe 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -8,9 +8,11 @@ import ( "math/big" "time" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -41,28 +43,71 @@ func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { return nil } -type UptimeAPI struct { - ctx *snow.Context - calculator uptime.LockedCalculator +type ValidatorsAPI struct { + vm *VM } -// TODO: add StartTime -type GetUptimeResponse struct { - NodeID ids.NodeID `json:"nodeID"` - Uptime time.Duration `json:"uptime"` +type GetCurrentValidatorResponse struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + StartTime time.Time `json:"startTime"` + IsActive bool `json:"isActive"` + IsConnected bool `json:"isConnected"` + UptimePercentage *json.Float32 `json:"uptimePercentage"` + Uptime time.Duration `json:"uptime"` } // GetUptime returns the uptime of the node -func (api *UptimeAPI) GetUptime(ctx context.Context, nodeID ids.NodeID) (*GetUptimeResponse, error) { - uptime, _, err := api.calculator.CalculateUptime(nodeID) - if err != nil { - return nil, err +func (api *ValidatorsAPI) GetCurrentValidators(ctx context.Context, nodeIDsArg *[]ids.NodeID) ([]GetCurrentValidatorResponse, error) { + api.vm.ctx.Lock.Lock() + defer api.vm.ctx.Lock.Unlock() + var nodeIDs set.Set[ids.NodeID] + if nodeIDsArg == nil || len(*nodeIDsArg) == 0 { + nodeIDs = api.vm.validatorState.GetValidatorIDs() + } else { + nodeIDs = set.Of(*nodeIDsArg...) } - return &GetUptimeResponse{ - NodeID: nodeID, - Uptime: uptime, - }, nil + responses := make([]GetCurrentValidatorResponse, 0, nodeIDs.Len()) + + for _, nodeID := range nodeIDs.List() { + validator, err := api.vm.validatorState.GetValidator(nodeID) + switch { + case err == database.ErrNotFound: + continue + case err != nil: + return nil, err + } + uptimePerc, err := api.getAPIUptimePerc(validator) + if err != nil { + return nil, err + } + isConnected := api.vm.uptimeManager.IsConnected(nodeID) + + uptime, _, err := api.vm.uptimeManager.CalculateUptime(nodeID) + if err != nil { + return nil, err + } + + responses = append(responses, GetCurrentValidatorResponse{ + ValidationID: validator.ValidationID, + NodeID: nodeID, + StartTime: validator.StartTime, + IsActive: validator.IsActive, + UptimePercentage: uptimePerc, + IsConnected: isConnected, + Uptime: time.Duration(uptime.Seconds()), + }) + } + return responses, nil } -// TODO: add GetUptime for currently tracked peers +func (api *ValidatorsAPI) getAPIUptimePerc(validator *validators.ValidatorOutput) (*json.Float32, error) { + rawUptime, err := api.vm.uptimeManager.CalculateUptimePercentFrom(validator.NodeID, validator.StartTime) + if err != nil { + return nil, err + } + // Transform this to a percentage (0-100) + uptime := json.Float32(rawUptime * 100) + return &uptime, nil +} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index bd8cc0a2a6..70154344ea 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -723,11 +723,13 @@ func (vm *VM) onNormalOperationsStarted() error { vm.cancel = cancel // update validators first - vm.performValidatorUpdate(ctx) + if err := vm.performValidatorUpdate(ctx); err != nil { + return fmt.Errorf("failed to update validators: %w", err) + } vdrIDs := vm.validatorState.GetValidatorIDs().List() // then start tracking with updated validators if err := vm.uptimeManager.StartTracking(vdrIDs); err != nil { - return err + return fmt.Errorf("failed to start tracking uptime: %w", err) } // dispatch validator set update vm.shutdownWg.Add(1) @@ -1083,11 +1085,11 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "warp") } - if vm.config.UptimeAPIEnabled { - if err := handler.RegisterName("uptime", &UptimeAPI{vm.ctx, vm.LockedCalculator}); err != nil { + if vm.config.ValidatorsAPIEnabled { + if err := handler.RegisterName("validators", &ValidatorsAPI{vm}); err != nil { return nil, err } - enabledAPIs = append(enabledAPIs, "uptime") + enabledAPIs = append(enabledAPIs, "validators") } log.Info(fmt.Sprintf("Enabled APIs: %s", strings.Join(enabledAPIs, ", "))) @@ -1264,7 +1266,9 @@ func (vm *VM) dispatchUpdateValidators(ctx context.Context) { select { case <-ticker.C: vm.ctx.Lock.Lock() - vm.performValidatorUpdate(ctx) + if err := vm.performValidatorUpdate(ctx); err != nil { + log.Error("failed to update validators", "error", err) + } vm.ctx.Lock.Unlock() case <-ctx.Done(): return @@ -1276,24 +1280,24 @@ func (vm *VM) dispatchUpdateValidators(ctx context.Context) { // and writes the state to the database. func (vm *VM) performValidatorUpdate(ctx context.Context) error { now := time.Now() - vm.logger.Debug("performing validator update") + log.Debug("performing validator update") // get current validator set currentValidatorSet, err := vm.mockedPChainValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) if err != nil { - return err + return fmt.Errorf("failed to get current validator set: %w", err) } // load the current validator set into the validator state if err := vm.loadCurrentValidators(currentValidatorSet); err != nil { - return err + return fmt.Errorf("failed to load current validators: %w", err) } // write validators to the database if err := vm.validatorState.WriteState(); err != nil { - return err + return fmt.Errorf("failed to write validator state: %w", err) } - vm.logger.Debug("validator update complete", "duration", time.Since(now)) + log.Debug("validator update complete", "duration", time.Since(now)) return nil } @@ -1323,7 +1327,7 @@ func (vm *VM) loadCurrentValidators(vdrs map[ids.ID]*ValidatorOutput) error { } } } else { - err := vm.validatorState.AddNewValidator(vdr.VID, vdr.NodeID, vdr.StartTime, vdr.IsActive) + err := vm.validatorState.AddNewValidator(vID, vdr.NodeID, vdr.StartTime, vdr.IsActive) if err != nil { return err } From 86a68c8852291607dfdcafee3567458cee6a8db4 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 17 Sep 2024 15:43:15 -0400 Subject: [PATCH 23/98] fix mock state --- plugin/evm/mock_validator_state.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/plugin/evm/mock_validator_state.go b/plugin/evm/mock_validator_state.go index b838b8c9db..cb3088a515 100644 --- a/plugin/evm/mock_validator_state.go +++ b/plugin/evm/mock_validator_state.go @@ -78,13 +78,17 @@ func (t *MockValidatorState) GetCurrentValidatorSet(ctx context.Context, subnetI isActive = recordedValidator.IsActive setWeightNonce = recordedValidator.SetWeightNonce } - // Converts the key to a validationID + // Converts the 20 bytes nodeID to a 32-bytes validationID // TODO: This is a temporary solution until we can use the correct ID type - validationID, err := ids.ToID(key.Bytes()) + // fill bytes with 0s to make it 32 bytes + keyBytes := make([]byte, 32) + copy(keyBytes[:], key.Bytes()) + validationID, err := ids.ToID(keyBytes) if err != nil { return nil, err } output[validationID] = &ValidatorOutput{ + VID: validationID, NodeID: value.NodeID, IsActive: isActive, StartTime: startTime, From f43bae2ba76483dba3a559f49226d7a1391459b6 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 18 Sep 2024 18:04:56 -0400 Subject: [PATCH 24/98] tests --- plugin/evm/config.go | 7 + plugin/evm/mock_validator_state.go | 40 +-- plugin/evm/tx_gossip_test.go | 2 +- plugin/evm/validators/mocks.go | 76 ++++++ plugin/evm/validators/state_test.go | 21 ++ plugin/evm/vm.go | 46 ++-- plugin/evm/vm_test.go | 5 + plugin/evm/vm_validators_state_test.go | 333 +++++++++++++++++++++++++ scripts/mocks.mockgen.txt | 2 + utils/snow.go | 16 +- 10 files changed, 490 insertions(+), 58 deletions(-) create mode 100644 plugin/evm/validators/mocks.go create mode 100644 plugin/evm/vm_validators_state_test.go diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 5bfddec33d..74285b72a0 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -61,6 +61,9 @@ const ( defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request defaultValidatorsAPIEnabled = true + + // TODO: decide for a sane value for this + defaultLoadValidatorsFrequency = 5 * time.Minute ) var ( @@ -227,6 +230,9 @@ type Config struct { // RPC settings HttpBodyLimit uint64 `json:"http-body-limit"` + + // LoadValidatorsFrequency is the frequency at which the node should load the validators + LoadValidatorsFrequency time.Duration `json:"load-validators-frequency"` } // EthAPIs returns an array of strings representing the Eth APIs that should be enabled @@ -287,6 +293,7 @@ func (c *Config) SetDefaults() { c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize c.ValidatorsAPIEnabled = defaultValidatorsAPIEnabled + c.LoadValidatorsFrequency = defaultLoadValidatorsFrequency } func (d *Duration) UnmarshalJSON(data []byte) (err error) { diff --git a/plugin/evm/mock_validator_state.go b/plugin/evm/mock_validator_state.go index cb3088a515..bcab6dcba5 100644 --- a/plugin/evm/mock_validator_state.go +++ b/plugin/evm/mock_validator_state.go @@ -18,7 +18,7 @@ var ( DefaultIsActive = true ) -type ValidatorOutput struct { +type MockValidatorOutput struct { NodeID ids.NodeID VID ids.ID IsActive bool @@ -32,36 +32,20 @@ type MockedValidatorState interface { validators.State // GetCurrentValidatorSet returns the current validator set for the provided subnet // Returned map contains the ValidationID as the key and the ValidatorOutput as the value - GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*ValidatorOutput, error) -} - -type recordedValidator struct { - StartTime uint64 - SetWeightNonce uint64 - IsActive bool + GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*MockValidatorOutput, error) } type MockValidatorState struct { validators.State - recordedValidators map[ids.NodeID]recordedValidator } func NewMockValidatorState(pState validators.State) MockedValidatorState { return &MockValidatorState{ - State: pState, - recordedValidators: make(map[ids.NodeID]recordedValidator), + State: pState, } } -func (t *MockValidatorState) RecordValidator(nodeID ids.NodeID, startTime, setWeightNonce uint64) { - t.recordedValidators[nodeID] = recordedValidator{ - StartTime: startTime, - SetWeightNonce: setWeightNonce, - IsActive: true, - } -} - -func (t *MockValidatorState) GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*ValidatorOutput, error) { +func (t *MockValidatorState) GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*MockValidatorOutput, error) { currentPHeight, err := t.GetCurrentHeight(ctx) if err != nil { return nil, err @@ -70,14 +54,8 @@ func (t *MockValidatorState) GetCurrentValidatorSet(ctx context.Context, subnetI if err != nil { return nil, err } - output := make(map[ids.ID]*ValidatorOutput, len(validatorSet)) + output := make(map[ids.ID]*MockValidatorOutput, len(validatorSet)) for key, value := range validatorSet { - startTime, isActive, setWeightNonce := DefaultStartTime, DefaultIsActive, DefaultSetWeightNonce - if recordedValidator, ok := t.recordedValidators[key]; ok { - startTime = recordedValidator.StartTime - isActive = recordedValidator.IsActive - setWeightNonce = recordedValidator.SetWeightNonce - } // Converts the 20 bytes nodeID to a 32-bytes validationID // TODO: This is a temporary solution until we can use the correct ID type // fill bytes with 0s to make it 32 bytes @@ -87,12 +65,12 @@ func (t *MockValidatorState) GetCurrentValidatorSet(ctx context.Context, subnetI if err != nil { return nil, err } - output[validationID] = &ValidatorOutput{ + output[validationID] = &MockValidatorOutput{ VID: validationID, NodeID: value.NodeID, - IsActive: isActive, - StartTime: startTime, - SetWeightNonce: setWeightNonce, + IsActive: DefaultIsActive, + StartTime: DefaultStartTime, + SetWeightNonce: DefaultSetWeightNonce, Weight: value.Weight, BLSPublicKey: value.PublicKey, } diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index 36c47ffde5..a87a7a3896 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -37,7 +37,7 @@ func TestEthTxGossip(t *testing.T) { require := require.New(t) ctx := context.Background() snowCtx := utils.TestSnowContext() - validatorState := &validatorstest.State{} + validatorState := utils.NewTestValidatorState() snowCtx.ValidatorState = validatorState responseSender := &enginetest.SenderStub{ diff --git a/plugin/evm/validators/mocks.go b/plugin/evm/validators/mocks.go new file mode 100644 index 0000000000..c54bbab3e9 --- /dev/null +++ b/plugin/evm/validators/mocks.go @@ -0,0 +1,76 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/subnet-evm/plugin/evm/validators (interfaces: StateCallbackListener) +// +// Generated by this command: +// +// mockgen -package=validators -destination=plugin/evm/validators/mocks.go github.com/ava-labs/subnet-evm/plugin/evm/validators StateCallbackListener +// + +// Package validators is a generated GoMock package. +package validators + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "go.uber.org/mock/gomock" +) + +// MockStateCallbackListener is a mock of StateCallbackListener interface. +type MockStateCallbackListener struct { + ctrl *gomock.Controller + recorder *MockStateCallbackListenerMockRecorder +} + +// MockStateCallbackListenerMockRecorder is the mock recorder for MockStateCallbackListener. +type MockStateCallbackListenerMockRecorder struct { + mock *MockStateCallbackListener +} + +// NewMockStateCallbackListener creates a new mock instance. +func NewMockStateCallbackListener(ctrl *gomock.Controller) *MockStateCallbackListener { + mock := &MockStateCallbackListener{ctrl: ctrl} + mock.recorder = &MockStateCallbackListenerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStateCallbackListener) EXPECT() *MockStateCallbackListenerMockRecorder { + return m.recorder +} + +// OnValidatorAdded mocks base method. +func (m *MockStateCallbackListener) OnValidatorAdded(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64, arg3 bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnValidatorAdded", arg0, arg1, arg2, arg3) +} + +// OnValidatorAdded indicates an expected call of OnValidatorAdded. +func (mr *MockStateCallbackListenerMockRecorder) OnValidatorAdded(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnValidatorAdded", reflect.TypeOf((*MockStateCallbackListener)(nil).OnValidatorAdded), arg0, arg1, arg2, arg3) +} + +// OnValidatorRemoved mocks base method. +func (m *MockStateCallbackListener) OnValidatorRemoved(arg0 ids.ID, arg1 ids.NodeID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnValidatorRemoved", arg0, arg1) +} + +// OnValidatorRemoved indicates an expected call of OnValidatorRemoved. +func (mr *MockStateCallbackListenerMockRecorder) OnValidatorRemoved(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnValidatorRemoved", reflect.TypeOf((*MockStateCallbackListener)(nil).OnValidatorRemoved), arg0, arg1) +} + +// OnValidatorStatusUpdated mocks base method. +func (m *MockStateCallbackListener) OnValidatorStatusUpdated(arg0 ids.ID, arg1 ids.NodeID, arg2 bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnValidatorStatusUpdated", arg0, arg1, arg2) +} + +// OnValidatorStatusUpdated indicates an expected call of OnValidatorStatusUpdated. +func (mr *MockStateCallbackListenerMockRecorder) OnValidatorStatusUpdated(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnValidatorStatusUpdated", reflect.TypeOf((*MockStateCallbackListener)(nil).OnValidatorStatusUpdated), arg0, arg1, arg2) +} diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index d4398553b3..ded1cd38fa 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -252,18 +252,37 @@ func TestStateListener(t *testing.T) { // remove validator state.DeleteValidator(expectedvID) + + require.Equal(3, listener.called) + + // test case: check initial trigger when registering listener + // add new validator + state.AddNewValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) + newListener := &testCallbackListener{ + t: t, + onAdd: func(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { + require.Equal(expectedvID, vID) + require.Equal(expectedNodeID, nodeID) + require.Equal(uint64(expectedStartTime.Unix()), startTime) + require.True(isActive) + }, + } + state.RegisterListener(newListener) + require.Equal(1, newListener.called) } var _ StateCallbackListener = (*testCallbackListener)(nil) type testCallbackListener struct { t *testing.T + called int onAdd func(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) onRemove func(ids.ID, ids.NodeID) onStatusUpdate func(ids.ID, ids.NodeID, bool) } func (t *testCallbackListener) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { + t.called++ if t.onAdd != nil { t.onAdd(vID, nodeID, startTime, isActive) } else { @@ -272,6 +291,7 @@ func (t *testCallbackListener) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, s } func (t *testCallbackListener) OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) { + t.called++ if t.onRemove != nil { t.onRemove(vID, nodeID) } else { @@ -280,6 +300,7 @@ func (t *testCallbackListener) OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) } func (t *testCallbackListener) OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) { + t.called++ if t.onStatusUpdate != nil { t.onStatusUpdate(vID, nodeID, isActive) } else { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 70154344ea..110729d0d9 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -122,9 +122,6 @@ const ( txGossipThrottlingPeriod = 10 * time.Second txGossipThrottlingLimit = 2 txGossipPollSize = 1 - - // TODO: decide for a sane value for this - loadValidatorFrequency = 5 * time.Minute ) // Define the API endpoints for the VM @@ -247,8 +244,7 @@ type VM struct { ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper - uptimeManager uptime.PausableManager - LockedCalculator avalancheUptime.LockedCalculator + uptimeManager uptime.PausableManager // TODO/: remove this after implementing GetCurrentValidatorSet mockedPChainValidatorState MockedValidatorState @@ -479,6 +475,7 @@ func (vm *VM) Initialize( if err != nil { return fmt.Errorf("failed to initialize p2p network: %w", err) } + // TODO: consider using p2p validators for Subnet-EVM's validatorState vm.validators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) vm.networkCodec = message.Codec vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) @@ -492,8 +489,6 @@ func (vm *VM) Initialize( } // TODO: add a configuration to disable tracking uptime vm.uptimeManager = uptime.NewPausableManager(avalancheUptime.NewManager(vm.validatorState, &vm.clock)) - vm.LockedCalculator = avalancheUptime.NewLockedCalculator() - vm.LockedCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) vm.validatorState.RegisterListener(vm.uptimeManager) // Initialize warp backend @@ -1071,6 +1066,14 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "subnet-evm-admin") } + if vm.config.ValidatorsAPIEnabled { + if err := handler.RegisterName("validators", &ValidatorsAPI{vm}); err != nil { + return nil, err + } + enabledAPIs = append(enabledAPIs, "validators") + } + + // RPC APIs if vm.config.SnowmanAPIEnabled { if err := handler.RegisterName("snowman", &SnowmanAPI{vm}); err != nil { return nil, err @@ -1085,13 +1088,6 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "warp") } - if vm.config.ValidatorsAPIEnabled { - if err := handler.RegisterName("validators", &ValidatorsAPI{vm}); err != nil { - return nil, err - } - enabledAPIs = append(enabledAPIs, "validators") - } - log.Info(fmt.Sprintf("Enabled APIs: %s", strings.Join(enabledAPIs, ", "))) apis[ethRPCEndpoint] = handler apis[ethWSEndpoint] = handler.WebsocketHandlerWithDuration( @@ -1259,7 +1255,7 @@ func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { } func (vm *VM) dispatchUpdateValidators(ctx context.Context) { - ticker := time.NewTicker(loadValidatorFrequency) + ticker := time.NewTicker(vm.config.LoadValidatorsFrequency) defer ticker.Stop() for { @@ -1288,7 +1284,7 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { } // load the current validator set into the validator state - if err := vm.loadCurrentValidators(currentValidatorSet); err != nil { + if err := loadValidators(vm.validatorState, currentValidatorSet); err != nil { return fmt.Errorf("failed to load current validators: %w", err) } @@ -1302,33 +1298,33 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { } // TODO: cache the last updated height and then load if needed -func (vm *VM) loadCurrentValidators(vdrs map[ids.ID]*ValidatorOutput) error { - currentValidationIDs := vm.validatorState.GetValidationIDs() +// loadValidators loads the [validators] into the validator state [validatorState] +func loadValidators(validatorState validators.State, validators map[ids.ID]*MockValidatorOutput) error { + currentValidationIDs := validatorState.GetValidationIDs() // first check if we need to delete any existing validators for vID := range currentValidationIDs { // if the validator is not in the new set of validators // delete the validator - if _, exists := vdrs[vID]; !exists { - vm.validatorState.DeleteValidator(vID) + if _, exists := validators[vID]; !exists { + validatorState.DeleteValidator(vID) } } // then load the new validators - for vID, vdr := range vdrs { + for vID, vdr := range validators { if currentValidationIDs.Contains(vID) { // Check if IsActive has changed - isActive, err := vm.validatorState.GetStatus(vID) + isActive, err := validatorState.GetStatus(vID) if err != nil { return err } if isActive != vdr.IsActive { - if err := vm.validatorState.SetStatus(vID, vdr.IsActive); err != nil { + if err := validatorState.SetStatus(vID, vdr.IsActive); err != nil { return err } } } else { - err := vm.validatorState.AddNewValidator(vID, vdr.NodeID, vdr.StartTime, vdr.IsActive) - if err != nil { + if err := validatorState.AddNewValidator(vID, vdr.NodeID, vdr.StartTime, vdr.IsActive); err != nil { return err } } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 934a999684..6dbd1493ae 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -33,6 +33,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" avalancheConstants "github.com/ava-labs/avalanchego/utils/constants" @@ -152,6 +153,10 @@ func NewContext() *snow.Context { _ = aliaser.Alias(testXChainID, "X") _ = aliaser.Alias(testXChainID, testXChainID.String()) ctx.ValidatorState = &validatorstest.State{ + GetCurrentHeightF: func(ctx context.Context) (uint64, error) { return 0, nil }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{}, nil + }, GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { subnetID, ok := map[ids.ID]ids.ID{ avalancheConstants.PlatformChainID: avalancheConstants.PrimaryNetworkID, diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go new file mode 100644 index 0000000000..72ffcde590 --- /dev/null +++ b/plugin/evm/vm_validators_state_test.go @@ -0,0 +1,333 @@ +package evm + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + avagoValidators "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" + "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestValidatorState(t *testing.T) { + require := require.New(t) + genesis := &core.Genesis{} + require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONLatest))) + genesisJSON, err := genesis.MarshalJSON() + require.NoError(err) + + vm := &VM{} + ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) + appSender := &enginetest.Sender{T: t} + appSender.CantSendAppGossip = true + testValidatorIDs := []ids.NodeID{ + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + } + ctx.ValidatorState = &validatorstest.State{ + GetCurrentHeightF: func(ctx context.Context) (uint64, error) { return 0, nil }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*avagoValidators.GetValidatorOutput, error) { + return map[ids.NodeID]*avagoValidators.GetValidatorOutput{ + testValidatorIDs[0]: { + NodeID: testValidatorIDs[0], + PublicKey: nil, + Weight: 1, + }, + testValidatorIDs[1]: { + NodeID: testValidatorIDs[1], + PublicKey: nil, + Weight: 1, + }, + testValidatorIDs[2]: { + NodeID: testValidatorIDs[2], + PublicKey: nil, + Weight: 1, + }, + }, nil + }, + } + appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + err = vm.Initialize( + context.Background(), + ctx, + dbManager, + genesisBytes, + []byte(""), + []byte(""), + issuer, + []*commonEng.Fx{}, + appSender, + ) + require.NoError(err, "error initializing GenesisVM") + + // Test case 1: state should not be populated until bootstrapped + require.Equal(0, vm.validatorState.GetValidationIDs().Len()) + _, _, err = vm.uptimeManager.CalculateUptime(testValidatorIDs[0]) + require.ErrorIs(database.ErrNotFound, err) + require.False(vm.uptimeManager.StartedTracking()) + + // Test case 2: state should be populated after bootstrapped + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.Equal(3, vm.validatorState.GetValidationIDs().Len()) + _, _, err = vm.uptimeManager.CalculateUptime(testValidatorIDs[0]) + require.NoError(err) + require.True(vm.uptimeManager.StartedTracking()) + + // Test case 3: restarting VM should not lose state + vm.Shutdown(context.Background()) + // Shutdown should stop tracking + require.False(vm.uptimeManager.StartedTracking()) + + vm = &VM{} + validatorsLoadFrequency := 5 * time.Second + configJSON := fmt.Sprintf(`{"load-validators-frequency": %g}`, validatorsLoadFrequency.Seconds()) + err = vm.Initialize( + context.Background(), + NewContext(), // this context does not have validators state, making VM to source it from the database + dbManager, + genesisBytes, + []byte(""), + []byte(configJSON), + issuer, + []*commonEng.Fx{}, + appSender, + ) + require.NoError(err, "error initializing GenesisVM") + require.Equal(3, vm.validatorState.GetValidationIDs().Len()) + _, _, err = vm.uptimeManager.CalculateUptime(testValidatorIDs[0]) + require.NoError(err) + require.False(vm.uptimeManager.StartedTracking()) + + // Test case 4: new validators should be added to the state + newValidatorID := ids.GenerateTestNodeID() + testState := &validatorstest.State{ + GetCurrentHeightF: func(ctx context.Context) (uint64, error) { return 0, nil }, + GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*avagoValidators.GetValidatorOutput, error) { + return map[ids.NodeID]*avagoValidators.GetValidatorOutput{ + testValidatorIDs[0]: { + NodeID: testValidatorIDs[0], + PublicKey: nil, + Weight: 1, + }, + testValidatorIDs[1]: { + NodeID: testValidatorIDs[1], + PublicKey: nil, + Weight: 1, + }, + testValidatorIDs[2]: { + NodeID: testValidatorIDs[2], + PublicKey: nil, + Weight: 1, + }, + newValidatorID: { + NodeID: newValidatorID, + PublicKey: nil, + Weight: 1, + }, + }, nil + }, + } + vm.mockedPChainValidatorState = NewMockValidatorState(testState) + // set VM as bootstrapped + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + + // new validator should be added to the state eventually after validatorsLoadFrequency + require.EventuallyWithT(func(c *assert.CollectT) { + assert.Equal(c, 4, vm.validatorState.GetValidatorIDs().Len()) + newValidator, err := vm.validatorState.GetValidator(newValidatorID) + assert.NoError(c, err) + assert.Equal(c, newValidatorID, newValidator.NodeID) + }, validatorsLoadFrequency*2, validatorsLoadFrequency/2) +} + +func TestLoadNewValidators(t *testing.T) { + testNodeIDs := []ids.NodeID{ + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + } + testValidationIDs := []ids.ID{ + ids.GenerateTestID(), + ids.GenerateTestID(), + ids.GenerateTestID(), + } + tests := []struct { + name string + initialValidators map[ids.ID]*MockValidatorOutput + newValidators map[ids.ID]*MockValidatorOutput + registerMockListenerCalls func(*validators.MockStateCallbackListener) + }{ + { + name: "before empty/after empty", + initialValidators: map[ids.ID]*MockValidatorOutput{}, + newValidators: map[ids.ID]*MockValidatorOutput{}, + registerMockListenerCalls: func(*validators.MockStateCallbackListener) {}, + }, + { + name: "before empty/after one", + initialValidators: map[ids.ID]*MockValidatorOutput{}, + newValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + }, + }, + { + name: "before one/after empty", + initialValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*MockValidatorOutput{}, + registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it will be removed + mock.EXPECT().OnValidatorRemoved(testValidationIDs[0], testNodeIDs[0]).Times(1) + }, + }, + { + name: "no change", + initialValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + }, + }, + { + name: "status change and new one", + initialValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: false, + StartTime: 0, + }, + testValidationIDs[1]: { + NodeID: testNodeIDs[1], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it will be updated + mock.EXPECT().OnValidatorStatusUpdated(testValidationIDs[0], testNodeIDs[0], false).Times(1) + // new validator will be added + mock.EXPECT().OnValidatorAdded(testValidationIDs[1], testNodeIDs[1], uint64(0), true).Times(1) + }, + }, + { + name: "renew validation ID", + initialValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[1]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it will be removed + mock.EXPECT().OnValidatorRemoved(testValidationIDs[0], testNodeIDs[0]).Times(1) + // new validator will be added + mock.EXPECT().OnValidatorAdded(testValidationIDs[1], testNodeIDs[0], uint64(0), true).Times(1) + }, + }, + { + name: "renew node ID", + initialValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*MockValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[1], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it won't be called since we don't track the node ID changes + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + require := require.New(tt) + db := memdb.New() + validatorState, err := validators.NewState(db) + require.NoError(err) + + // set initial validators + for vID, validator := range test.initialValidators { + err := validatorState.AddNewValidator(vID, validator.NodeID, validator.StartTime, validator.IsActive) + require.NoError(err) + } + // enable mock listener + ctrl := gomock.NewController(tt) + mockListener := validators.NewMockStateCallbackListener(ctrl) + test.registerMockListenerCalls(mockListener) + + validatorState.RegisterListener(mockListener) + require.NoError(loadValidators(validatorState, test.newValidators)) + ctrl.Finish() + }) + } +} diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index 391dc8e13c..3fef0f4a3a 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -1,2 +1,4 @@ github.com/ava-labs/subnet-evm/precompile/precompileconfig=Predicater,Config,ChainConfig,Accepter=precompile/precompileconfig/mocks.go github.com/ava-labs/subnet-evm/precompile/contract=BlockContext,AccessibleState,StateDB=precompile/contract/mocks.go +github.com/ava-labs/subnet-evm/plugin/evm/validators=StateCallbackListener=plugin/evm/validators/mocks.go + diff --git a/utils/snow.go b/utils/snow.go index 2042c9ff2d..48df0388aa 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -4,9 +4,12 @@ package utils import ( + "context" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -30,6 +33,17 @@ func TestSnowContext() *snow.Context { BCLookup: ids.NewAliaser(), Metrics: metrics.NewPrefixGatherer(), ChainDataDir: "", - ValidatorState: &validatorstest.State{}, + ValidatorState: NewTestValidatorState(), + } +} + +func NewTestValidatorState() *validatorstest.State { + return &validatorstest.State{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return make(map[ids.NodeID]*validators.GetValidatorOutput), nil + }, } } From 92f6b7e9d8ee42500d6d99191e5c4959f95838cf Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 12:23:38 -0400 Subject: [PATCH 25/98] Update plugin/evm/validators/state.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/validators/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index a3a410f35c..e7f95a7703 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -63,7 +63,7 @@ type validatorData struct { type state struct { data map[ids.ID]*validatorData // vID -> validatorData index map[ids.NodeID]ids.ID // nodeID -> vID - // updatedData tracks the updates since las WriteValidator was called + // updatedData tracks the updates since WriteValidator was last called updatedData map[ids.ID]bool // vID -> true(updated)/false(deleted) db database.Database From 0db20413d8cb3b1c47e5fbb1c315ac4e13be5a8f Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 12:32:12 -0400 Subject: [PATCH 26/98] use update enum --- plugin/evm/validators/state.go | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index a3a410f35c..6bcd2f525c 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -15,6 +15,13 @@ import ( var _ uptime.State = &state{} +type dbUpdateStatus bool + +const ( + updated dbUpdateStatus = true + deleted dbUpdateStatus = false +) + type State interface { uptime.State // AddNewValidator adds a new validator to the state @@ -64,7 +71,7 @@ type state struct { data map[ids.ID]*validatorData // vID -> validatorData index map[ids.NodeID]ids.ID // nodeID -> vID // updatedData tracks the updates since las WriteValidator was called - updatedData map[ids.ID]bool // vID -> true(updated)/false(deleted) + updatedData map[ids.ID]dbUpdateStatus // vID -> updated/deleted db database.Database listeners []StateCallbackListener @@ -75,7 +82,7 @@ func NewState(db database.Database) (State, error) { s := &state{ index: make(map[ids.NodeID]ids.ID), data: make(map[ids.ID]*validatorData), - updatedData: make(map[ids.ID]bool), + updatedData: make(map[ids.ID]dbUpdateStatus), db: db, } if err := s.loadFromDisk(); err != nil { @@ -108,7 +115,7 @@ func (s *state) SetUptime( data.UpDuration = upDuration data.lastUpdated = lastUpdated - s.updatedData[data.validationID] = true + s.updatedData[data.validationID] = updated return nil } @@ -140,7 +147,7 @@ func (s *state) AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp ui return err } - s.updatedData[vID] = true + s.updatedData[vID] = updated for _, listener := range s.listeners { listener.OnValidatorAdded(vID, nodeID, startTimestamp, isActive) @@ -159,7 +166,7 @@ func (s *state) DeleteValidator(vID ids.ID) error { delete(s.index, data.NodeID) // mark as deleted for WriteValidator - s.updatedData[data.validationID] = false + s.updatedData[data.validationID] = deleted for _, listener := range s.listeners { listener.OnValidatorRemoved(vID, data.NodeID) @@ -171,8 +178,9 @@ func (s *state) DeleteValidator(vID ids.ID) error { func (s *state) WriteState() error { // TODO: consider adding batch size batch := s.db.NewBatch() - for vID, updated := range s.updatedData { - if updated { + for vID, updateStatus := range s.updatedData { + switch updateStatus { + case updated: data := s.data[vID] data.LastUpdated = uint64(data.lastUpdated.Unix()) // should never change but in case @@ -185,10 +193,12 @@ func (s *state) WriteState() error { if err := batch.Put(vID[:], dataBytes); err != nil { return err } - } else { // deleted + case deleted: if err := batch.Delete(vID[:]); err != nil { return err } + default: + return fmt.Errorf("unknown update status for %s", vID) } // we're done, remove the updated marker delete(s.updatedData, vID) @@ -203,7 +213,7 @@ func (s *state) SetStatus(vID ids.ID, isActive bool) error { return database.ErrNotFound } data.IsActive = isActive - s.updatedData[vID] = true + s.updatedData[vID] = updated for _, listener := range s.listeners { listener.OnValidatorStatusUpdated(vID, data.NodeID, isActive) From c5520bcbd8048f306d210377b2f4bec7deb5fcef Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 13:20:11 -0400 Subject: [PATCH 27/98] Update plugin/evm/validators/state.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/validators/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index e7f95a7703..a486577f9d 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -239,7 +239,7 @@ func (s *state) GetValidatorIDs() set.Set[ids.NodeID] { } // RegisterListener registers a listener to the state -// the listener will be notified of current validators via OnValidatorAdded +// OnValidatorAdded is called for all current validators on the provided listener before this function returns func (s *state) RegisterListener(listener StateCallbackListener) { s.listeners = append(s.listeners, listener) From dea94af1ce1c8e03b55ab4650e38489983eb443d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 13:20:21 -0400 Subject: [PATCH 28/98] Update plugin/evm/validators/state.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/validators/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index a486577f9d..7dfb5b9620 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -299,7 +299,7 @@ func (s *state) putData(vID ids.ID, data *validatorData) error { } // getData returns the data for the validator with the given nodeID -// returns ErrNotFound if the data does not exist +// returns database.ErrNotFound if the data does not exist func (s *state) getData(nodeID ids.NodeID) (*validatorData, error) { vID, exists := s.index[nodeID] if !exists { From c0f6ff40e20dbcb0875318b67cc69cbe51b40f18 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 13:45:35 -0400 Subject: [PATCH 29/98] respond to comments --- plugin/evm/validators/state.go | 54 ++++++++++++++++------------- plugin/evm/validators/state_test.go | 52 +++++++++++++-------------- 2 files changed, 54 insertions(+), 52 deletions(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index 6bcd2f525c..bc42f82a21 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -17,6 +17,8 @@ var _ uptime.State = &state{} type dbUpdateStatus bool +var ErrAlreadyExists = fmt.Errorf("validator already exists") + const ( updated dbUpdateStatus = true deleted dbUpdateStatus = false @@ -24,8 +26,8 @@ const ( type State interface { uptime.State - // AddNewValidator adds a new validator to the state - AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error + // AddValidator adds a new validator to the state + AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error // DeleteValidator deletes the validator from the state DeleteValidator(vID ids.ID) error // WriteState writes the validator state to the disk @@ -63,8 +65,6 @@ type validatorData struct { IsActive bool `serialize:"true"` validationID ids.ID // database key - lastUpdated time.Time - startTime time.Time } type state struct { @@ -99,7 +99,7 @@ func (s *state) GetUptime( if err != nil { return 0, time.Time{}, err } - return data.UpDuration, data.lastUpdated, nil + return data.UpDuration, data.getLastUpdated(), nil } // SetUptime sets the uptime of the validator with the given nodeID @@ -113,7 +113,7 @@ func (s *state) SetUptime( return err } data.UpDuration = upDuration - data.lastUpdated = lastUpdated + data.setLastUpdated(lastUpdated) s.updatedData[data.validationID] = updated return nil @@ -125,14 +125,12 @@ func (s *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { if err != nil { return time.Time{}, err } - return data.startTime, nil + return data.getStartTime(), nil } -// AddNewValidator adds a new validator to the state +// AddValidator adds a new validator to the state // the new validator is marked as updated and will be written to the disk when WriteState is called -func (s *state) AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { - startTimeUnix := time.Unix(int64(startTimestamp), 0) - +func (s *state) AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { data := &validatorData{ NodeID: nodeID, validationID: vID, @@ -140,10 +138,8 @@ func (s *state) AddNewValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp ui StartTime: startTimestamp, UpDuration: 0, LastUpdated: startTimestamp, - lastUpdated: startTimeUnix, - startTime: startTimeUnix, } - if err := s.putData(vID, data); err != nil { + if err := s.addData(vID, data); err != nil { return err } @@ -182,9 +178,6 @@ func (s *state) WriteState() error { switch updateStatus { case updated: data := s.data[vID] - data.LastUpdated = uint64(data.lastUpdated.Unix()) - // should never change but in case - data.StartTime = uint64(data.startTime.Unix()) dataBytes, err := vdrCodec.Marshal(codecVersion, data) if err != nil { @@ -255,7 +248,7 @@ func (s *state) RegisterListener(listener StateCallbackListener) { // notify the listener of the current state for vID, data := range s.data { - listener.OnValidatorAdded(vID, data.NodeID, uint64(data.startTime.Unix()), data.IsActive) + listener.OnValidatorAdded(vID, data.NodeID, data.StartTime, data.IsActive) } } @@ -266,8 +259,6 @@ func parseValidatorData(bytes []byte, data *validatorData) error { return err } } - data.lastUpdated = time.Unix(int64(data.LastUpdated), 0) - data.startTime = time.Unix(int64(data.StartTime), 0) return nil } @@ -287,20 +278,21 @@ func (s *state) loadFromDisk() error { if err := parseValidatorData(it.Value(), vdr); err != nil { return fmt.Errorf("failed to parse validator data: %w", err) } - if err := s.putData(vID, vdr); err != nil { + if err := s.addData(vID, vdr); err != nil { return err } } return it.Error() } -func (s *state) putData(vID ids.ID, data *validatorData) error { +// addData adds the data to the state +// returns an error if the data already exists +func (s *state) addData(vID ids.ID, data *validatorData) error { if _, exists := s.data[vID]; exists { - return fmt.Errorf("validator data already exists for %s", vID) + return fmt.Errorf("%w, vID: %s", ErrAlreadyExists, vID) } - // should never happen if _, exists := s.index[data.NodeID]; exists { - return fmt.Errorf("validator data already exists for %s", data.NodeID) + return fmt.Errorf("%w, nodeID: %s", ErrAlreadyExists, data.NodeID) } s.data[vID] = data @@ -321,3 +313,15 @@ func (s *state) getData(nodeID ids.NodeID) (*validatorData, error) { } return data, nil } + +func (v *validatorData) setLastUpdated(t time.Time) { + v.LastUpdated = uint64(t.Unix()) +} + +func (v *validatorData) getLastUpdated() time.Time { + return time.Unix(int64(v.LastUpdated), 0) +} + +func (v *validatorData) getStartTime() time.Time { + return time.Unix(int64(v.StartTime), 0) +} diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index d4398553b3..7ba50bde89 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -34,29 +34,29 @@ func TestState(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) // add new validator - state.AddNewValidator(vID, nodeID, uint64(startTime.Unix()), true) + state.AddValidator(vID, nodeID, uint64(startTime.Unix()), true) // adding the same validator should fail - err = state.AddNewValidator(vID, nodeID, uint64(startTime.Unix()), true) - require.Error(err) + err = state.AddValidator(vID, ids.GenerateTestNodeID(), uint64(startTime.Unix()), true) + require.ErrorIs(err, ErrAlreadyExists) // adding the same nodeID should fail - err = state.AddNewValidator(ids.GenerateTestID(), nodeID, uint64(startTime.Unix()), true) - require.Error(err) + err = state.AddValidator(ids.GenerateTestID(), nodeID, uint64(startTime.Unix()), true) + require.ErrorIs(err, ErrAlreadyExists) // get uptime - upDuration, lastUpdated, err := state.GetUptime(nodeID) + uptime, lastUpdated, err := state.GetUptime(nodeID) require.NoError(err) - require.Equal(time.Duration(0), upDuration) + require.Equal(time.Duration(0), uptime) require.Equal(startTime.Unix(), lastUpdated.Unix()) // set uptime - newUpDuration := 2 * time.Minute + newuptime := 2 * time.Minute newLastUpdated := lastUpdated.Add(time.Hour) - require.NoError(state.SetUptime(nodeID, newUpDuration, newLastUpdated)) + require.NoError(state.SetUptime(nodeID, newuptime, newLastUpdated)) // get new uptime - upDuration, lastUpdated, err = state.GetUptime(nodeID) + uptime, lastUpdated, err = state.GetUptime(nodeID) require.NoError(err) - require.Equal(newUpDuration, upDuration) + require.Equal(newuptime, uptime) require.Equal(newLastUpdated, lastUpdated) // set status @@ -67,7 +67,7 @@ func TestState(t *testing.T) { require.False(status) // delete uptime - state.DeleteValidator(vID) + require.NoError(state.DeleteValidator(vID)) // get deleted uptime _, _, err = state.GetUptime(nodeID) @@ -86,16 +86,16 @@ func TestWriteValidator(t *testing.T) { nodeID := ids.GenerateTestNodeID() vID := ids.GenerateTestID() startTime := time.Now() - state.AddNewValidator(vID, nodeID, uint64(startTime.Unix()), true) + require.NoError(state.AddValidator(vID, nodeID, uint64(startTime.Unix()), true)) // write state, should reflect to DB require.NoError(state.WriteState()) require.True(db.Has(vID[:])) // set uptime - newUpDuration := 2 * time.Minute + newuptime := 2 * time.Minute newLastUpdated := startTime.Add(time.Hour) - require.NoError(state.SetUptime(nodeID, newUpDuration, newLastUpdated)) + require.NoError(state.SetUptime(nodeID, newuptime, newLastUpdated)) require.NoError(state.WriteState()) // refresh state, should load from DB @@ -103,13 +103,13 @@ func TestWriteValidator(t *testing.T) { require.NoError(err) // get uptime - upDuration, lastUpdated, err := state.GetUptime(nodeID) + uptime, lastUpdated, err := state.GetUptime(nodeID) require.NoError(err) - require.Equal(newUpDuration, upDuration) + require.Equal(newuptime, uptime) require.Equal(newLastUpdated.Unix(), lastUpdated.Unix()) // delete - state.DeleteValidator(vID) + require.NoError(state.DeleteValidator(vID)) // write state, should reflect to DB require.NoError(state.WriteState()) @@ -130,8 +130,8 @@ func TestParseValidator(t *testing.T) { name: "nil", bytes: nil, expected: &validatorData{ - lastUpdated: time.Unix(0, 0), - startTime: time.Unix(0, 0), + LastUpdated: 0, + StartTime: 0, }, expectedErr: nil, }, @@ -139,8 +139,8 @@ func TestParseValidator(t *testing.T) { name: "empty", bytes: []byte{}, expected: &validatorData{ - lastUpdated: time.Unix(0, 0), - startTime: time.Unix(0, 0), + LastUpdated: 0, + StartTime: 0, }, expectedErr: nil, }, @@ -165,15 +165,13 @@ func TestParseValidator(t *testing.T) { expected: &validatorData{ UpDuration: time.Duration(6000000), LastUpdated: 900000, - lastUpdated: time.Unix(900000, 0), NodeID: testNodeID, StartTime: 6000000, - startTime: time.Unix(6000000, 0), IsActive: true, }, }, { - name: "invalid codec version", + name: " ", bytes: []byte{ // codec version 0x00, 0x02, @@ -245,13 +243,13 @@ func TestStateListener(t *testing.T) { state.RegisterListener(listener) // add new validator - state.AddNewValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) + require.NoError(state.AddValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true)) // set status require.NoError(state.SetStatus(expectedvID, false)) // remove validator - state.DeleteValidator(expectedvID) + require.NoError(state.DeleteValidator(expectedvID)) } var _ StateCallbackListener = (*testCallbackListener)(nil) From b566103400b0846b2594283652f9038e062f5e76 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 13:51:21 -0400 Subject: [PATCH 30/98] update avalanchego dep branch --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index cc35468149..f36e984d69 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.12 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.11 + github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240916220401-1753950304a4 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 32359d6de1..3ecbfbb723 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,8 @@ github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanchego v1.11.11 h1:MIQq8xRavRj4ZXHA4G+aMiymig7SOScGOG1SApmMvBc= github.com/ava-labs/avalanchego v1.11.11/go.mod h1:yFx3V31Jy9NFa8GZlgGnwiVf8KGjeF2+Uc99l9Scd/8= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240916220401-1753950304a4 h1:07qWIUU3C/nAVBJK5orGZKoEVodQE8OsfnpPZ8cTnSQ= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240916220401-1753950304a4/go.mod h1:yFx3V31Jy9NFa8GZlgGnwiVf8KGjeF2+Uc99l9Scd/8= github.com/ava-labs/coreth v0.13.8-fixed-genesis-upgrade.0.20240815193440-a96bc921e732 h1:wlhGJbmb7s3bU2QWtxKjscGjfHknQiq+cVhhUjONsB8= github.com/ava-labs/coreth v0.13.8-fixed-genesis-upgrade.0.20240815193440-a96bc921e732/go.mod h1:RkQLaQ961Xe/sUb3ycn4Qi18vPPuEetTqDf2eDcquAs= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= From ad3a35a20d723acf7712f2a7784e0e596938d3c6 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 14:20:01 -0400 Subject: [PATCH 31/98] reviews --- plugin/evm/uptime/pausable_manager.go | 33 ++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go index 841c84b45d..2c91151d4a 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/uptime/pausable_manager.go @@ -7,6 +7,7 @@ import ( "errors" "github.com/ava-labs/subnet-evm/plugin/evm/validators" + "github.com/ethereum/go-ethereum/log" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/uptime" @@ -15,7 +16,11 @@ import ( var _ validators.StateCallbackListener = &pausableManager{} -var ErrPausedDc = errors.New("paused node cannot be disconnected") +var ( + errPausedDisconnect = errors.New("paused node cannot be disconnected") + errAlreadyPaused = errors.New("node is already paused") + errNotPaused = errors.New("node is not paused") +) type PausableManager interface { uptime.Manager @@ -59,7 +64,7 @@ func (p *pausableManager) Disconnect(nodeID ids.NodeID) error { if p.Manager.IsConnected(nodeID) { if p.IsPaused(nodeID) { // We should never see this case - return ErrPausedDc + return errPausedDisconnect } return p.Manager.Disconnect(nodeID) } @@ -82,7 +87,10 @@ func (p *pausableManager) StartTracking(nodeIDs []ids.NodeID) error { // If the node is inactive, it will be paused. func (p *pausableManager) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { if !isActive { - p.pause(nodeID) + err := p.pause(nodeID) + if err != nil { + log.Error("failed to pause node %s: %s", nodeID, err) + } } } @@ -90,17 +98,24 @@ func (p *pausableManager) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startT // If the node is already paused, it will be resumed. func (p *pausableManager) OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) { if p.IsPaused(nodeID) { - p.resume(nodeID) + err := p.resume(nodeID) + if err != nil { + log.Error("failed to resume node %s: %s", nodeID, err) + } } } // OnValidatorStatusUpdated is called when the status of a validator is updated. // If the node is active, it will be resumed. If the node is inactive, it will be paused. func (p *pausableManager) OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) { + var err error if isActive { - p.resume(nodeID) + err = p.resume(nodeID) } else { - p.pause(nodeID) + err = p.pause(nodeID) + } + if err != nil { + log.Error("failed to update status for node %s: %s", nodeID, err) } } @@ -113,6 +128,9 @@ func (p *pausableManager) IsPaused(nodeID ids.NodeID) bool { // pause can disconnect the node from the uptime.Manager if it is connected. // Returns an error if the node is already paused. func (p *pausableManager) pause(nodeID ids.NodeID) error { + if p.IsPaused(nodeID) { + return errAlreadyPaused + } p.pausedVdrs.Add(nodeID) if p.Manager.IsConnected(nodeID) { // If the node is connected, then we need to disconnect it from @@ -128,6 +146,9 @@ func (p *pausableManager) pause(nodeID ids.NodeID) error { // resume can connect the node to the uptime.Manager if it was connected. // Returns an error if the node is not paused. func (p *pausableManager) resume(nodeID ids.NodeID) error { + if !p.IsPaused(nodeID) { + return errNotPaused + } p.pausedVdrs.Remove(nodeID) if p.connectedVdrs.Contains(nodeID) { return p.Manager.Connect(nodeID) From 64fe238e1b46666ad476c3e4dd1698b37a43c602 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 14:21:09 -0400 Subject: [PATCH 32/98] reword errs --- plugin/evm/uptime/pausable_manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go index 2c91151d4a..1505616aeb 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/uptime/pausable_manager.go @@ -89,7 +89,7 @@ func (p *pausableManager) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startT if !isActive { err := p.pause(nodeID) if err != nil { - log.Error("failed to pause node %s: %s", nodeID, err) + log.Error("failed to handle added validator %s: %s", nodeID, err) } } } @@ -100,7 +100,7 @@ func (p *pausableManager) OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) { if p.IsPaused(nodeID) { err := p.resume(nodeID) if err != nil { - log.Error("failed to resume node %s: %s", nodeID, err) + log.Error("failed to handle validator removed %s: %s", nodeID, err) } } } From d7338da9db31d522caaeb346ba4dc6cd7bf722a9 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 14:27:16 -0400 Subject: [PATCH 33/98] fix test changes --- plugin/evm/validators/state_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index 7ba50bde89..2babe1d6ab 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -50,13 +50,13 @@ func TestState(t *testing.T) { require.Equal(startTime.Unix(), lastUpdated.Unix()) // set uptime - newuptime := 2 * time.Minute + newUptime := 2 * time.Minute newLastUpdated := lastUpdated.Add(time.Hour) - require.NoError(state.SetUptime(nodeID, newuptime, newLastUpdated)) + require.NoError(state.SetUptime(nodeID, newUptime, newLastUpdated)) // get new uptime uptime, lastUpdated, err = state.GetUptime(nodeID) require.NoError(err) - require.Equal(newuptime, uptime) + require.Equal(newUptime, uptime) require.Equal(newLastUpdated, lastUpdated) // set status @@ -93,9 +93,9 @@ func TestWriteValidator(t *testing.T) { require.True(db.Has(vID[:])) // set uptime - newuptime := 2 * time.Minute + newUptime := 2 * time.Minute newLastUpdated := startTime.Add(time.Hour) - require.NoError(state.SetUptime(nodeID, newuptime, newLastUpdated)) + require.NoError(state.SetUptime(nodeID, newUptime, newLastUpdated)) require.NoError(state.WriteState()) // refresh state, should load from DB @@ -105,7 +105,7 @@ func TestWriteValidator(t *testing.T) { // get uptime uptime, lastUpdated, err := state.GetUptime(nodeID) require.NoError(err) - require.Equal(newuptime, uptime) + require.Equal(newUptime, uptime) require.Equal(newLastUpdated.Unix(), lastUpdated.Unix()) // delete @@ -171,7 +171,7 @@ func TestParseValidator(t *testing.T) { }, }, { - name: " ", + name: "invalid codec version", bytes: []byte{ // codec version 0x00, 0x02, From 9ad552800c6702fa723d33eee252f1fc78d3c66c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 19 Sep 2024 16:29:05 -0400 Subject: [PATCH 34/98] fix upgrades after deactivating latest in context --- params/config.go | 40 ++++++++++++--------------------- params/network_upgrades_test.go | 5 +++-- plugin/evm/vm_test.go | 1 - utils/snow.go | 4 ++-- 4 files changed, 19 insertions(+), 31 deletions(-) diff --git a/params/config.go b/params/config.go index 41db362e7e..5f75dc2815 100644 --- a/params/config.go +++ b/params/config.go @@ -48,24 +48,6 @@ var ( FeeConfig: DefaultFeeConfig, AllowFeeRecipients: false, - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - NetworkUpgrades: getDefaultNetworkUpgrades(upgrade.GetConfig(constants.MainnetID)), // This can be changed to correct network (local, test) via VM. - GenesisPrecompiles: Precompiles{}, - } - - TestChainConfig = &ChainConfig{ - AvalancheContext: AvalancheContext{utils.TestSnowContext()}, - ChainID: big.NewInt(1), - FeeConfig: DefaultFeeConfig, - AllowFeeRecipients: false, HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(0), @@ -77,11 +59,8 @@ var ( MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(0), - ShanghaiTime: utils.TimeToNewUint64(upgrade.GetConfig(constants.UnitTestID).DurangoTime), - CancunTime: utils.TimeToNewUint64(upgrade.GetConfig(constants.UnitTestID).EtnaTime), - NetworkUpgrades: getDefaultNetworkUpgrades(upgrade.GetConfig(constants.UnitTestID)), // This can be changed to correct network (local, test) via VM. + NetworkUpgrades: getDefaultNetworkUpgrades(upgrade.GetConfig(constants.MainnetID)), // This can be changed to correct network (local, test) via VM. GenesisPrecompiles: Precompiles{}, - UpgradeConfig: UpgradeConfig{}, } TestPreSubnetEVMChainConfig = &ChainConfig{ @@ -146,9 +125,12 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ShanghaiTime: utils.TimeToNewUint64(upgrade.InitiallyActiveTime), NetworkUpgrades: NetworkUpgrades{ SubnetEVMTimestamp: utils.NewUint64(0), - DurangoTimestamp: utils.NewUint64(0), + DurangoTimestamp: utils.TimeToNewUint64(upgrade.InitiallyActiveTime), EtnaTimestamp: utils.TimeToNewUint64(upgrade.UnscheduledActivationTime), }, GenesisPrecompiles: Precompiles{}, @@ -169,15 +151,21 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + ShanghaiTime: utils.TimeToNewUint64(upgrade.InitiallyActiveTime), + CancunTime: utils.TimeToNewUint64(upgrade.InitiallyActiveTime), NetworkUpgrades: NetworkUpgrades{ SubnetEVMTimestamp: utils.NewUint64(0), - DurangoTimestamp: utils.NewUint64(0), - EtnaTimestamp: utils.NewUint64(0), + DurangoTimestamp: utils.TimeToNewUint64(upgrade.InitiallyActiveTime), + EtnaTimestamp: utils.TimeToNewUint64(upgrade.InitiallyActiveTime), }, GenesisPrecompiles: Precompiles{}, UpgradeConfig: UpgradeConfig{}, } - TestRules = TestChainConfig.Rules(new(big.Int), 0) + + TestChainConfig = TestEtnaChainConfig + TestRules = TestChainConfig.Rules(new(big.Int), 0) ) // ChainConfig is the core config which determines the blockchain settings. diff --git a/params/network_upgrades_test.go b/params/network_upgrades_test.go index 1a93a2587e..d476ff66be 100644 --- a/params/network_upgrades_test.go +++ b/params/network_upgrades_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/ava-labs/avalanchego/upgrade" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/subnet-evm/utils" "github.com/stretchr/testify/require" @@ -179,13 +180,13 @@ func TestVerifyNetworkUpgrades(t *testing.T) { expected bool }{ { - name: "ValidNetworkUpgrades for custom network", + name: "ValidNetworkUpgrades for latest network", upgrades: &NetworkUpgrades{ SubnetEVMTimestamp: utils.NewUint64(0), DurangoTimestamp: utils.NewUint64(1607144400), EtnaTimestamp: utils.NewUint64(1607144400), }, - avagoUpgrades: upgrade.GetConfig(1111), + avagoUpgrades: upgradetest.GetConfig(upgradetest.Latest), expected: true, }, { diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 934a999684..13a22a8e81 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -145,7 +145,6 @@ func NewContext() *snow.Context { ctx.ChainID = testCChainID ctx.AVAXAssetID = testAvaxAssetID ctx.XChainID = testXChainID - ctx.NetworkUpgrades = upgrade.GetConfig(testNetworkID) aliaser := ctx.BCLookup.(ids.Aliaser) _ = aliaser.Alias(testCChainID, "C") _ = aliaser.Alias(testCChainID, testCChainID.String()) diff --git a/utils/snow.go b/utils/snow.go index 2042c9ff2d..f7e194ef7e 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" - "github.com/ava-labs/avalanchego/upgrade" + "github.com/ava-labs/avalanchego/upgrade/upgradetest" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -24,7 +24,7 @@ func TestSnowContext() *snow.Context { SubnetID: ids.Empty, ChainID: ids.Empty, NodeID: ids.EmptyNodeID, - NetworkUpgrades: upgrade.Default, + NetworkUpgrades: upgradetest.GetConfig(upgradetest.Latest), PublicKey: pk, Log: logging.NoLog{}, BCLookup: ids.NewAliaser(), From 3225a3221ddeacca9b1c2b3e5316ea93e76916e0 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 20 Sep 2024 10:47:34 -0400 Subject: [PATCH 35/98] use test branch from avalanchego --- go.mod | 4 +- go.sum | 10 ++- plugin/evm/api.go | 38 +++++++++ plugin/evm/mock_validator_state.go | 79 ------------------- plugin/evm/service.go | 103 ++++++++----------------- plugin/evm/validators/state.go | 2 +- plugin/evm/validators/state_test.go | 2 +- plugin/evm/vm.go | 21 ++--- plugin/evm/vm_validators_state_test.go | 98 ++++++++++++----------- 9 files changed, 143 insertions(+), 214 deletions(-) create mode 100644 plugin/evm/api.go delete mode 100644 plugin/evm/mock_validator_state.go diff --git a/go.mod b/go.mod index f36e984d69..8996f62174 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.12 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240916220401-1753950304a4 + github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -55,7 +55,7 @@ require ( require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/ava-labs/coreth v0.13.8-fixed-genesis-upgrade.0.20240815193440-a96bc921e732 // indirect + github.com/ava-labs/coreth v0.13.8 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect diff --git a/go.sum b/go.sum index 13435695b9..ad8bc233a2 100644 --- a/go.sum +++ b/go.sum @@ -58,10 +58,12 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240916220401-1753950304a4 h1:07qWIUU3C/nAVBJK5orGZKoEVodQE8OsfnpPZ8cTnSQ= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240916220401-1753950304a4/go.mod h1:yFx3V31Jy9NFa8GZlgGnwiVf8KGjeF2+Uc99l9Scd/8= -github.com/ava-labs/coreth v0.13.8-fixed-genesis-upgrade.0.20240815193440-a96bc921e732 h1:wlhGJbmb7s3bU2QWtxKjscGjfHknQiq+cVhhUjONsB8= -github.com/ava-labs/coreth v0.13.8-fixed-genesis-upgrade.0.20240815193440-a96bc921e732/go.mod h1:RkQLaQ961Xe/sUb3ycn4Qi18vPPuEetTqDf2eDcquAs= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240919204453-a754e44c1795 h1:zxqtKkAuU70XQlr3Pz6RQDvopXBqGDu2y2PshXe507U= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240919204453-a754e44c1795/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62 h1:H/8gjHFcYDC02oSDehRQW3s89cIV6aM2u7WfxMuxlNU= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= +github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= +github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= diff --git a/plugin/evm/api.go b/plugin/evm/api.go new file mode 100644 index 0000000000..a8fe61cbc0 --- /dev/null +++ b/plugin/evm/api.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// SnowmanAPI introduces snowman specific functionality to the evm +type SnowmanAPI struct{ vm *VM } + +// GetAcceptedFrontReply defines the reply that will be sent from the +// GetAcceptedFront API call +type GetAcceptedFrontReply struct { + Hash common.Hash `json:"hash"` + Number *big.Int `json:"number"` +} + +// GetAcceptedFront returns the last accepted block's hash and height +func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { + blk := api.vm.blockChain.LastConsensusAcceptedBlock() + return &GetAcceptedFrontReply{ + Hash: blk.Hash(), + Number: blk.Number(), + }, nil +} + +// IssueBlock to the chain +func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { + log.Info("Issuing a new block") + api.vm.builder.signalTxsReady() + return nil +} diff --git a/plugin/evm/mock_validator_state.go b/plugin/evm/mock_validator_state.go deleted file mode 100644 index bcab6dcba5..0000000000 --- a/plugin/evm/mock_validator_state.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "context" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/crypto/bls" -) - -var ( - DefaultStartTime = uint64(time.Date(2024, time.July, 30, 0, 0, 0, 0, time.UTC).Unix()) - DefaultSetWeightNonce = uint64(0) - DefaultIsActive = true -) - -type MockValidatorOutput struct { - NodeID ids.NodeID - VID ids.ID - IsActive bool - StartTime uint64 - SetWeightNonce uint64 - Weight uint64 - BLSPublicKey *bls.PublicKey -} - -type MockedValidatorState interface { - validators.State - // GetCurrentValidatorSet returns the current validator set for the provided subnet - // Returned map contains the ValidationID as the key and the ValidatorOutput as the value - GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*MockValidatorOutput, error) -} - -type MockValidatorState struct { - validators.State -} - -func NewMockValidatorState(pState validators.State) MockedValidatorState { - return &MockValidatorState{ - State: pState, - } -} - -func (t *MockValidatorState) GetCurrentValidatorSet(ctx context.Context, subnetID ids.ID) (map[ids.ID]*MockValidatorOutput, error) { - currentPHeight, err := t.GetCurrentHeight(ctx) - if err != nil { - return nil, err - } - validatorSet, err := t.GetValidatorSet(ctx, currentPHeight, subnetID) - if err != nil { - return nil, err - } - output := make(map[ids.ID]*MockValidatorOutput, len(validatorSet)) - for key, value := range validatorSet { - // Converts the 20 bytes nodeID to a 32-bytes validationID - // TODO: This is a temporary solution until we can use the correct ID type - // fill bytes with 0s to make it 32 bytes - keyBytes := make([]byte, 32) - copy(keyBytes[:], key.Bytes()) - validationID, err := ids.ToID(keyBytes) - if err != nil { - return nil, err - } - output[validationID] = &MockValidatorOutput{ - VID: validationID, - NodeID: value.NodeID, - IsActive: DefaultIsActive, - StartTime: DefaultStartTime, - SetWeightNonce: DefaultSetWeightNonce, - Weight: value.Weight, - BLSPublicKey: value.PublicKey, - } - } - return output, nil -} diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 4f1a5c72fe..4a2e59951a 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -4,71 +4,46 @@ package evm import ( - "context" - "math/big" + "net/http" "time" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/subnet-evm/plugin/evm/validators" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" ) -// SnowmanAPI introduces snowman specific functionality to the evm -type SnowmanAPI struct{ vm *VM } - -// GetAcceptedFrontReply defines the reply that will be sent from the -// GetAcceptedFront API call -type GetAcceptedFrontReply struct { - Hash common.Hash `json:"hash"` - Number *big.Int `json:"number"` -} - -// GetAcceptedFront returns the last accepted block's hash and height -func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { - blk := api.vm.blockChain.LastConsensusAcceptedBlock() - return &GetAcceptedFrontReply{ - Hash: blk.Hash(), - Number: blk.Number(), - }, nil +type ValidatorsAPI struct { + vm *VM } -// IssueBlock to the chain -func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { - log.Info("Issuing a new block") - api.vm.builder.signalTxsReady() - return nil +type GetCurrentValidatorsRequest struct { + NodeIDs []ids.NodeID `json:"nodeIDs"` } -type ValidatorsAPI struct { - vm *VM +type GetCurrentValidatorsResponse struct { + Validators []currentValidatorResponse `json:"validators"` } -type GetCurrentValidatorResponse struct { - ValidationID ids.ID `json:"validationID"` - NodeID ids.NodeID `json:"nodeID"` - StartTime time.Time `json:"startTime"` - IsActive bool `json:"isActive"` - IsConnected bool `json:"isConnected"` - UptimePercentage *json.Float32 `json:"uptimePercentage"` - Uptime time.Duration `json:"uptime"` +type currentValidatorResponse struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + StartTime time.Time `json:"startTime"` + IsActive bool `json:"isActive"` + IsConnected bool `json:"isConnected"` + Uptime time.Duration `json:"uptime"` } // GetUptime returns the uptime of the node -func (api *ValidatorsAPI) GetCurrentValidators(ctx context.Context, nodeIDsArg *[]ids.NodeID) ([]GetCurrentValidatorResponse, error) { - api.vm.ctx.Lock.Lock() - defer api.vm.ctx.Lock.Unlock() - var nodeIDs set.Set[ids.NodeID] - if nodeIDsArg == nil || len(*nodeIDsArg) == 0 { +func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsRequest, reply *GetCurrentValidatorsResponse) error { + api.vm.ctx.Lock.RLock() + defer api.vm.ctx.Lock.RUnlock() + + nodeIDs := set.Of(args.NodeIDs...) + if nodeIDs.Len() == 0 { nodeIDs = api.vm.validatorState.GetValidatorIDs() - } else { - nodeIDs = set.Of(*nodeIDsArg...) } - responses := make([]GetCurrentValidatorResponse, 0, nodeIDs.Len()) + reply.Validators = make([]currentValidatorResponse, 0, nodeIDs.Len()) for _, nodeID := range nodeIDs.List() { validator, err := api.vm.validatorState.GetValidator(nodeID) @@ -76,38 +51,24 @@ func (api *ValidatorsAPI) GetCurrentValidators(ctx context.Context, nodeIDsArg * case err == database.ErrNotFound: continue case err != nil: - return nil, err - } - uptimePerc, err := api.getAPIUptimePerc(validator) - if err != nil { - return nil, err + return err } + isConnected := api.vm.uptimeManager.IsConnected(nodeID) uptime, _, err := api.vm.uptimeManager.CalculateUptime(nodeID) if err != nil { - return nil, err + return err } - responses = append(responses, GetCurrentValidatorResponse{ - ValidationID: validator.ValidationID, - NodeID: nodeID, - StartTime: validator.StartTime, - IsActive: validator.IsActive, - UptimePercentage: uptimePerc, - IsConnected: isConnected, - Uptime: time.Duration(uptime.Seconds()), + reply.Validators = append(reply.Validators, currentValidatorResponse{ + ValidationID: validator.ValidationID, + NodeID: nodeID, + StartTime: validator.StartTime, + IsActive: validator.IsActive, + IsConnected: isConnected, + Uptime: time.Duration(uptime.Seconds()), }) } - return responses, nil -} - -func (api *ValidatorsAPI) getAPIUptimePerc(validator *validators.ValidatorOutput) (*json.Float32, error) { - rawUptime, err := api.vm.uptimeManager.CalculateUptimePercentFrom(validator.NodeID, validator.StartTime) - if err != nil { - return nil, err - } - // Transform this to a percentage (0-100) - uptime := json.Float32(rawUptime * 100) - return &uptime, nil + return nil } diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index c182561209..3ea7711bae 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -259,7 +259,7 @@ func (s *state) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { return &ValidatorOutput{ ValidationID: data.validationID, NodeID: data.NodeID, - StartTime: data.startTime, + StartTime: data.getStartTime(), IsActive: data.IsActive, }, nil } diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index 01bb3dc5c0..5db522b425 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -255,7 +255,7 @@ func TestStateListener(t *testing.T) { // test case: check initial trigger when registering listener // add new validator - state.AddNewValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) + state.AddValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) newListener := &testCallbackListener{ t: t, onAdd: func(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 110729d0d9..0c17519d3b 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -76,6 +76,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" avalancheUptime "github.com/ava-labs/avalanchego/snow/uptime" + avalancheValidators "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -129,6 +130,7 @@ const ( adminEndpoint = "/admin" ethRPCEndpoint = "/rpc" ethWSEndpoint = "/ws" + validatorsEndpoint = "/validators" ethTxGossipNamespace = "eth_tx_gossip" ) @@ -246,9 +248,7 @@ type VM struct { uptimeManager uptime.PausableManager - // TODO/: remove this after implementing GetCurrentValidatorSet - mockedPChainValidatorState MockedValidatorState - validatorState validators.State + validatorState validators.State } // Initialize implements the snowman.ChainVM interface @@ -481,7 +481,6 @@ func (vm *VM) Initialize( vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) - vm.mockedPChainValidatorState = NewMockValidatorState(vm.ctx.ValidatorState) validatorsDB := prefixdb.New(validatorsDBPrefix, db) vm.validatorState, err = validators.NewState(validatorsDB) if err != nil { @@ -1067,9 +1066,11 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { } if vm.config.ValidatorsAPIEnabled { - if err := handler.RegisterName("validators", &ValidatorsAPI{vm}); err != nil { - return nil, err + validatorsAPI, err := newHandler("validators", &ValidatorsAPI{vm}) + if err != nil { + return nil, fmt.Errorf("failed to register service for admin API due to %w", err) } + apis[validatorsEndpoint] = validatorsAPI enabledAPIs = append(enabledAPIs, "validators") } @@ -1278,11 +1279,13 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { now := time.Now() log.Debug("performing validator update") // get current validator set - currentValidatorSet, err := vm.mockedPChainValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) + currentValidatorSet, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) if err != nil { return fmt.Errorf("failed to get current validator set: %w", err) } + log.Info("updating validators", "validatorSet", currentValidatorSet) + // load the current validator set into the validator state if err := loadValidators(vm.validatorState, currentValidatorSet); err != nil { return fmt.Errorf("failed to load current validators: %w", err) @@ -1299,7 +1302,7 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { // TODO: cache the last updated height and then load if needed // loadValidators loads the [validators] into the validator state [validatorState] -func loadValidators(validatorState validators.State, validators map[ids.ID]*MockValidatorOutput) error { +func loadValidators(validatorState validators.State, validators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { currentValidationIDs := validatorState.GetValidationIDs() // first check if we need to delete any existing validators for vID := range currentValidationIDs { @@ -1324,7 +1327,7 @@ func loadValidators(validatorState validators.State, validators map[ids.ID]*Mock } } } else { - if err := validatorState.AddNewValidator(vID, vdr.NodeID, vdr.StartTime, vdr.IsActive); err != nil { + if err := validatorState.AddValidator(vID, vdr.NodeID, vdr.StartTime, vdr.IsActive); err != nil { return err } } diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 72ffcde590..5a402441b8 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -32,31 +32,35 @@ func TestValidatorState(t *testing.T) { ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) appSender := &enginetest.Sender{T: t} appSender.CantSendAppGossip = true - testValidatorIDs := []ids.NodeID{ + testNodeIDs := []ids.NodeID{ ids.GenerateTestNodeID(), ids.GenerateTestNodeID(), ids.GenerateTestNodeID(), } + testValidationIDs := []ids.ID{ + ids.GenerateTestID(), + ids.GenerateTestID(), + ids.GenerateTestID(), + } ctx.ValidatorState = &validatorstest.State{ - GetCurrentHeightF: func(ctx context.Context) (uint64, error) { return 0, nil }, - GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*avagoValidators.GetValidatorOutput, error) { - return map[ids.NodeID]*avagoValidators.GetValidatorOutput{ - testValidatorIDs[0]: { - NodeID: testValidatorIDs[0], + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], PublicKey: nil, Weight: 1, }, - testValidatorIDs[1]: { - NodeID: testValidatorIDs[1], + testValidationIDs[1]: { + NodeID: testNodeIDs[1], PublicKey: nil, Weight: 1, }, - testValidatorIDs[2]: { - NodeID: testValidatorIDs[2], + testValidationIDs[2]: { + NodeID: testNodeIDs[2], PublicKey: nil, Weight: 1, }, - }, nil + }, 0, nil }, } appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } @@ -75,7 +79,7 @@ func TestValidatorState(t *testing.T) { // Test case 1: state should not be populated until bootstrapped require.Equal(0, vm.validatorState.GetValidationIDs().Len()) - _, _, err = vm.uptimeManager.CalculateUptime(testValidatorIDs[0]) + _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) require.ErrorIs(database.ErrNotFound, err) require.False(vm.uptimeManager.StartedTracking()) @@ -83,7 +87,7 @@ func TestValidatorState(t *testing.T) { require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) require.NoError(vm.SetState(context.Background(), snow.NormalOp)) require.Equal(3, vm.validatorState.GetValidationIDs().Len()) - _, _, err = vm.uptimeManager.CalculateUptime(testValidatorIDs[0]) + _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) require.NoError(err) require.True(vm.uptimeManager.StartedTracking()) @@ -108,40 +112,40 @@ func TestValidatorState(t *testing.T) { ) require.NoError(err, "error initializing GenesisVM") require.Equal(3, vm.validatorState.GetValidationIDs().Len()) - _, _, err = vm.uptimeManager.CalculateUptime(testValidatorIDs[0]) + _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) require.NoError(err) require.False(vm.uptimeManager.StartedTracking()) // Test case 4: new validators should be added to the state - newValidatorID := ids.GenerateTestNodeID() + newValidationID := ids.GenerateTestID() + newNodeID := ids.GenerateTestNodeID() testState := &validatorstest.State{ - GetCurrentHeightF: func(ctx context.Context) (uint64, error) { return 0, nil }, - GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*avagoValidators.GetValidatorOutput, error) { - return map[ids.NodeID]*avagoValidators.GetValidatorOutput{ - testValidatorIDs[0]: { - NodeID: testValidatorIDs[0], + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], PublicKey: nil, Weight: 1, }, - testValidatorIDs[1]: { - NodeID: testValidatorIDs[1], + testValidationIDs[1]: { + NodeID: testNodeIDs[1], PublicKey: nil, Weight: 1, }, - testValidatorIDs[2]: { - NodeID: testValidatorIDs[2], + testValidationIDs[2]: { + NodeID: testNodeIDs[2], PublicKey: nil, Weight: 1, }, - newValidatorID: { - NodeID: newValidatorID, + newValidationID: { + NodeID: newNodeID, PublicKey: nil, Weight: 1, }, - }, nil + }, 0, nil }, } - vm.mockedPChainValidatorState = NewMockValidatorState(testState) + vm.ctx.ValidatorState = testState // set VM as bootstrapped require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) require.NoError(vm.SetState(context.Background(), snow.NormalOp)) @@ -149,9 +153,9 @@ func TestValidatorState(t *testing.T) { // new validator should be added to the state eventually after validatorsLoadFrequency require.EventuallyWithT(func(c *assert.CollectT) { assert.Equal(c, 4, vm.validatorState.GetValidatorIDs().Len()) - newValidator, err := vm.validatorState.GetValidator(newValidatorID) + newValidator, err := vm.validatorState.GetValidator(newNodeID) assert.NoError(c, err) - assert.Equal(c, newValidatorID, newValidator.NodeID) + assert.Equal(c, newNodeID, newValidator.NodeID) }, validatorsLoadFrequency*2, validatorsLoadFrequency/2) } @@ -168,20 +172,20 @@ func TestLoadNewValidators(t *testing.T) { } tests := []struct { name string - initialValidators map[ids.ID]*MockValidatorOutput - newValidators map[ids.ID]*MockValidatorOutput + initialValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput + newValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput registerMockListenerCalls func(*validators.MockStateCallbackListener) }{ { name: "before empty/after empty", - initialValidators: map[ids.ID]*MockValidatorOutput{}, - newValidators: map[ids.ID]*MockValidatorOutput{}, + initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, + newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, registerMockListenerCalls: func(*validators.MockStateCallbackListener) {}, }, { name: "before empty/after one", - initialValidators: map[ids.ID]*MockValidatorOutput{}, - newValidators: map[ids.ID]*MockValidatorOutput{ + initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, + newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, @@ -194,14 +198,14 @@ func TestLoadNewValidators(t *testing.T) { }, { name: "before one/after empty", - initialValidators: map[ids.ID]*MockValidatorOutput{ + initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, StartTime: 0, }, }, - newValidators: map[ids.ID]*MockValidatorOutput{}, + newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { // initial validator will trigger first mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) @@ -211,14 +215,14 @@ func TestLoadNewValidators(t *testing.T) { }, { name: "no change", - initialValidators: map[ids.ID]*MockValidatorOutput{ + initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, StartTime: 0, }, }, - newValidators: map[ids.ID]*MockValidatorOutput{ + newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, @@ -231,14 +235,14 @@ func TestLoadNewValidators(t *testing.T) { }, { name: "status change and new one", - initialValidators: map[ids.ID]*MockValidatorOutput{ + initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, StartTime: 0, }, }, - newValidators: map[ids.ID]*MockValidatorOutput{ + newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: false, @@ -261,14 +265,14 @@ func TestLoadNewValidators(t *testing.T) { }, { name: "renew validation ID", - initialValidators: map[ids.ID]*MockValidatorOutput{ + initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, StartTime: 0, }, }, - newValidators: map[ids.ID]*MockValidatorOutput{ + newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[1]: { NodeID: testNodeIDs[0], IsActive: true, @@ -286,14 +290,14 @@ func TestLoadNewValidators(t *testing.T) { }, { name: "renew node ID", - initialValidators: map[ids.ID]*MockValidatorOutput{ + initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, StartTime: 0, }, }, - newValidators: map[ids.ID]*MockValidatorOutput{ + newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[1], IsActive: true, @@ -317,7 +321,7 @@ func TestLoadNewValidators(t *testing.T) { // set initial validators for vID, validator := range test.initialValidators { - err := validatorState.AddNewValidator(vID, validator.NodeID, validator.StartTime, validator.IsActive) + err := validatorState.AddValidator(vID, validator.NodeID, validator.StartTime, validator.IsActive) require.NoError(err) } // enable mock listener From df6ad02c1e142c1a8baca6fcf2e43fb6d78d70b6 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 20 Sep 2024 11:07:23 -0400 Subject: [PATCH 36/98] use branch commit for ava version --- scripts/versions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/versions.sh b/scripts/versions.sh index e76d3bbad6..38abad91f9 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'v1.11.11'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'17539503'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier From 738003f906f2a1da8f6b49b6032a42d62ab0cdbd Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 20 Sep 2024 12:16:04 -0400 Subject: [PATCH 37/98] update e2e ava version --- scripts/versions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/versions.sh b/scripts/versions.sh index e76d3bbad6..097d9559ba 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'v1.11.11'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'3adf6d4c'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier From 29a501dab6dad07baab924efcb3077af4d7d0e7c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 20 Sep 2024 12:29:29 -0400 Subject: [PATCH 38/98] update avago dep --- go.mod | 2 +- go.sum | 2 ++ scripts/versions.sh | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 8996f62174..59c9718cd4 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21.12 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62 + github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920150211-07af6b2fbe17 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ad8bc233a2..9c2917c9be 100644 --- a/go.sum +++ b/go.sum @@ -62,6 +62,8 @@ github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240919204453-a754e44c1795 h1:z github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240919204453-a754e44c1795/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62 h1:H/8gjHFcYDC02oSDehRQW3s89cIV6aM2u7WfxMuxlNU= github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920150211-07af6b2fbe17 h1:nc/U63uqdIikeiNexGe0PIOwSOWcRqmuOWuBxqJ7B3Y= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920150211-07af6b2fbe17/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/scripts/versions.sh b/scripts/versions.sh index 097d9559ba..739e0d2b75 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'3adf6d4c'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'07af6b2f'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier From 71c7de0fb743781019a6faaf8e186a22ef4b5fd9 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 20 Sep 2024 12:34:21 -0400 Subject: [PATCH 39/98] remove extra line... --- scripts/mocks.mockgen.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index 3fef0f4a3a..fcdbe2978a 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -1,4 +1,3 @@ github.com/ava-labs/subnet-evm/precompile/precompileconfig=Predicater,Config,ChainConfig,Accepter=precompile/precompileconfig/mocks.go github.com/ava-labs/subnet-evm/precompile/contract=BlockContext,AccessibleState,StateDB=precompile/contract/mocks.go -github.com/ava-labs/subnet-evm/plugin/evm/validators=StateCallbackListener=plugin/evm/validators/mocks.go - +github.com/ava-labs/subnet-evm/plugin/evm/validators=StateCallbackListener=plugin/evm/validators/mocks.go \ No newline at end of file From b9d6336e86f3fe246fbe37354fce8e110e0b9a13 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 26 Sep 2024 19:14:00 +0300 Subject: [PATCH 40/98] export struct --- plugin/evm/service.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 4a2e59951a..a7b0339599 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -21,10 +21,10 @@ type GetCurrentValidatorsRequest struct { } type GetCurrentValidatorsResponse struct { - Validators []currentValidatorResponse `json:"validators"` + Validators []CurrentValidator `json:"validators"` } -type currentValidatorResponse struct { +type CurrentValidator struct { ValidationID ids.ID `json:"validationID"` NodeID ids.NodeID `json:"nodeID"` StartTime time.Time `json:"startTime"` @@ -43,7 +43,7 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrent nodeIDs = api.vm.validatorState.GetValidatorIDs() } - reply.Validators = make([]currentValidatorResponse, 0, nodeIDs.Len()) + reply.Validators = make([]CurrentValidator, 0, nodeIDs.Len()) for _, nodeID := range nodeIDs.List() { validator, err := api.vm.validatorState.GetValidator(nodeID) @@ -61,7 +61,7 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrent return err } - reply.Validators = append(reply.Validators, currentValidatorResponse{ + reply.Validators = append(reply.Validators, CurrentValidator{ ValidationID: validator.ValidationID, NodeID: nodeID, StartTime: validator.StartTime, From beea962863a745c5e66922ada5e12ecc1fd5a9e1 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 9 Oct 2024 16:46:10 +0300 Subject: [PATCH 41/98] implement acp118 signer and verifier --- go.mod | 2 +- go.sum | 4 +- plugin/evm/vm.go | 5 +- warp/backend.go | 110 ++++++++---- warp/handlers/signature_request_p2p.go | 151 ----------------- warp/handlers/signature_request_test.go | 2 - warp/handlers/signer_verifier_p2p.go | 156 ++++++++++++++++++ ...2p_test.go => signer_verifier_p2p_test.go} | 39 +++-- warp/handlers/stats.go | 44 ++--- 9 files changed, 292 insertions(+), 221 deletions(-) delete mode 100644 warp/handlers/signature_request_p2p.go create mode 100644 warp/handlers/signer_verifier_p2p.go rename warp/handlers/{signature_request_p2p_test.go => signer_verifier_p2p_test.go} (82%) diff --git a/go.mod b/go.mod index f15a30e537..08265830d2 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241003220930-43d5b435a644 + github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241004185122-5285749cc752 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 089fdbe3f5..1693074e83 100644 --- a/go.sum +++ b/go.sum @@ -58,8 +58,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241003220930-43d5b435a644 h1:F3SVwl0bmatEgtL7FeREoLOPGZkNCID6gQk94WmeCgY= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241003220930-43d5b435a644/go.mod h1:qSHmog3wMVjo/ruIAQo0ppXAilyni07NIu5K88RyhWE= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241004185122-5285749cc752 h1:pQh5HA1sf31baNmN6RZpI02JOLmv7o7TcBY4VpuOSMI= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241004185122-5285749cc752/go.mod h1:qSHmog3wMVjo/ruIAQo0ppXAilyni07NIu5K88RyhWE= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 376a8a12c7..b874b6e53d 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -70,6 +70,7 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" @@ -624,7 +625,9 @@ func (vm *VM) initializeHandlers() { }) // Add p2p warp message warpHandler - warpHandler := handlers.NewSignatureRequestHandlerP2P(vm.warpBackend, vm.networkCodec) + signerVerifier := handlers.NewSignerVerifier(vm.warpBackend, vm.networkCodec) + // TODO: consider chancing acp118 handler to take a single Signer interface + warpHandler := acp118.NewHandler(signerVerifier, signerVerifier) vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) vm.setAppRequestHandlers() diff --git a/warp/backend.go b/warp/backend.go index 360161a336..b67be9ac70 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -37,10 +37,10 @@ type Backend interface { // AddMessage signs [unsignedMessage] and adds it to the warp backend database AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error - // GetMessageSignature returns the signature of the requested message. + // GetMessageSignature validates the message and returns the signature of the requested message. GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) - // GetBlockSignature returns the signature of the requested message hash. + // GetBlockSignature validates blockID and returns the signature of the requested message hash. GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) // GetMessage retrieves the [unsignedMessage] from the warp backend database if available @@ -48,6 +48,18 @@ type Backend interface { // to unsignedMessage (and this method can be removed). GetMessage(messageHash ids.ID) (*avalancheWarp.UnsignedMessage, error) + // ValidateMessage validates the [unsignedMessage] and returns an error if the message is invalid. + ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error + + // ValidateBlockMessage validates the block message with the given [blockID] and returns an error if the message is invalid. + ValidateBlockMessage(blockID ids.ID) error + + // SignMessage signs the [unsignedMessage] and returns the signature. + SignMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) + + // SignBlock signs the block message with the given [blockID] and returns the signature. + SignBlock(blockID ids.ID) ([bls.SignatureLen]byte, error) + // Clear clears the entire db Clear() error } @@ -154,21 +166,27 @@ func (b *backend) GetMessageSignature(unsignedMessage *avalancheWarp.UnsignedMes if err := b.ValidateMessage(unsignedMessage); err != nil { return [bls.SignatureLen]byte{}, fmt.Errorf("failed to validate warp message: %w", err) } + return b.signMessage(unsignedMessage) +} - var signature [bls.SignatureLen]byte - sig, err := b.warpSigner.Sign(unsignedMessage) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) +func (b *backend) SignMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { + messageID := unsignedMessage.ID() + + if sig, ok := b.messageSignatureCache.Get(messageID); ok { + return sig, nil } - copy(signature[:], sig) - b.messageSignatureCache.Put(messageID, signature) - return signature, nil + return b.signMessage(unsignedMessage) } func (b *backend) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { + messageID := unsignedMessage.ID() + + if _, ok := b.messageSignatureCache.Get(messageID); ok { + return nil + } // Known on-chain messages should be signed - if _, err := b.GetMessage(unsignedMessage.ID()); err == nil { + if _, err := b.GetMessage(messageID); err == nil { return nil } @@ -197,34 +215,38 @@ func (b *backend) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage return nil } -func (b *backend) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { - log.Debug("Getting block from backend", "blockID", blockID) - if sig, ok := b.blockSignatureCache.Get(blockID); ok { - return sig, nil +func (b *backend) ValidateBlockMessage(blockID ids.ID) error { + if _, ok := b.blockSignatureCache.Get(blockID); ok { + return nil } _, err := b.blockClient.GetAcceptedBlock(context.TODO(), blockID) if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to get block %s: %w", blockID, err) + return fmt.Errorf("failed to get block %s: %w", blockID, err) } - var signature [bls.SignatureLen]byte - blockHashPayload, err := payload.NewHash(blockID) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new block hash payload: %w", err) + return nil +} + +func (b *backend) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { + log.Debug("Getting block from backend", "blockID", blockID) + if sig, ok := b.blockSignatureCache.Get(blockID); ok { + return sig, nil } - unsignedMessage, err := avalancheWarp.NewUnsignedMessage(b.networkID, b.sourceChainID, blockHashPayload.Bytes()) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new unsigned warp message: %w", err) + + if err := b.ValidateBlockMessage(blockID); err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to validate block message: %w", err) } - sig, err := b.warpSigner.Sign(unsignedMessage) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) + + return b.signBlock(blockID) +} + +func (b *backend) SignBlock(blockID ids.ID) ([bls.SignatureLen]byte, error) { + if sig, ok := b.blockSignatureCache.Get(blockID); ok { + return sig, nil } - copy(signature[:], sig) - b.blockSignatureCache.Put(blockID, signature) - return signature, nil + return b.signBlock(blockID) } func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, error) { @@ -248,3 +270,35 @@ func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, return unsignedMessage, nil } + +func (b *backend) signMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { + sig, err := b.warpSigner.Sign(unsignedMessage) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) + } + + var signature [bls.SignatureLen]byte + copy(signature[:], sig) + b.messageSignatureCache.Put(unsignedMessage.ID(), signature) + return signature, nil +} + +func (b *backend) signBlock(blockID ids.ID) ([bls.SignatureLen]byte, error) { + blockHashPayload, err := payload.NewHash(blockID) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new block hash payload: %w", err) + } + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(b.networkID, b.sourceChainID, blockHashPayload.Bytes()) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new unsigned warp message: %w", err) + } + sig, err := b.warpSigner.Sign(unsignedMessage) + if err != nil { + return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) + } + + var signature [bls.SignatureLen]byte + copy(signature[:], sig) + b.blockSignatureCache.Put(blockID, signature) + return signature, nil +} diff --git a/warp/handlers/signature_request_p2p.go b/warp/handlers/signature_request_p2p.go deleted file mode 100644 index cb711974b2..0000000000 --- a/warp/handlers/signature_request_p2p.go +++ /dev/null @@ -1,151 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package handlers - -import ( - "context" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/subnet-evm/warp" - "google.golang.org/protobuf/proto" -) - -var _ p2p.Handler = (*SignatureRequestHandlerP2P)(nil) - -const ( - ErrFailedToParse = iota - ErrFailedToGetSig - ErrFailedToMarshal -) - -type AddressedCallHandler interface { - GetMessageSignature(*avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) -} - -// SignatureRequestHandlerP2P serves warp signature requests using the p2p -// framework from avalanchego. It is a peer.RequestHandler for -// message.MessageSignatureRequest. -type SignatureRequestHandlerP2P struct { - backend warp.Backend - codec codec.Manager - stats *handlerStats -} - -func NewSignatureRequestHandlerP2P(backend warp.Backend, codec codec.Manager) *SignatureRequestHandlerP2P { - return &SignatureRequestHandlerP2P{ - backend: backend, - codec: codec, - stats: newStats(), - } -} - -func (s *SignatureRequestHandlerP2P) AppRequest( - ctx context.Context, - nodeID ids.NodeID, - deadline time.Time, - requestBytes []byte, -) ([]byte, *common.AppError) { - // Per ACP-118, the requestBytes are the serialized form of - // sdk.SignatureRequest. - req := new(sdk.SignatureRequest) - if err := proto.Unmarshal(requestBytes, req); err != nil { - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: "failed to unmarshal request: " + err.Error(), - } - } - - unsignedMessage, err := avalancheWarp.ParseUnsignedMessage(req.Message) - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: "failed to parse unsigned message: " + err.Error(), - } - } - parsed, err := payload.Parse(unsignedMessage.Payload) - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: "failed to parse payload: " + err.Error(), - } - } - - var sig [bls.SignatureLen]byte - switch p := parsed.(type) { - case *payload.AddressedCall: - sig, err = s.GetMessageSignature(unsignedMessage) - if err != nil { - s.stats.IncMessageSignatureMiss() - } else { - s.stats.IncMessageSignatureHit() - } - case *payload.Hash: - sig, err = s.GetBlockSignature(p.Hash) - if err != nil { - s.stats.IncBlockSignatureMiss() - } else { - s.stats.IncBlockSignatureHit() - } - default: - return nil, &common.AppError{ - Code: ErrFailedToParse, - Message: fmt.Sprintf("unknown payload type: %T", p), - } - } - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToGetSig, - Message: "failed to get signature: " + err.Error(), - } - } - - // Per ACP-118, the responseBytes are the serialized form of - // sdk.SignatureResponse. - resp := &sdk.SignatureResponse{Signature: sig[:]} - respBytes, err := proto.Marshal(resp) - if err != nil { - return nil, &common.AppError{ - Code: ErrFailedToMarshal, - Message: "failed to marshal response: " + err.Error(), - } - } - return respBytes, nil -} - -func (s *SignatureRequestHandlerP2P) GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { - startTime := time.Now() - s.stats.IncMessageSignatureRequest() - - // Always report signature request time - defer func() { - s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) - }() - - return s.backend.GetMessageSignature(message) -} - -func (s *SignatureRequestHandlerP2P) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { - startTime := time.Now() - s.stats.IncBlockSignatureRequest() - - // Always report signature request time - defer func() { - s.stats.UpdateBlockSignatureRequestTime(time.Since(startTime)) - }() - - return s.backend.GetBlockSignature(blockID) -} - -func (s *SignatureRequestHandlerP2P) AppGossip( - ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { -} diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 1f699324cc..455ac268e2 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -101,7 +101,6 @@ func TestMessageSignatureHandler(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { handler := NewSignatureRequestHandler(backend, message.Codec) - handler.stats.Clear() request, expectedResponse := test.setup() responseBytes, err := handler.OnMessageSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) @@ -188,7 +187,6 @@ func TestBlockSignatureHandler(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { handler := NewSignatureRequestHandler(backend, message.Codec) - handler.stats.Clear() request, expectedResponse := test.setup() responseBytes, err := handler.OnBlockSignatureRequest(context.Background(), ids.GenerateTestNodeID(), 1, request) diff --git a/warp/handlers/signer_verifier_p2p.go b/warp/handlers/signer_verifier_p2p.go new file mode 100644 index 0000000000..db24fe9561 --- /dev/null +++ b/warp/handlers/signer_verifier_p2p.go @@ -0,0 +1,156 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handlers + +import ( + "context" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/acp118" + "github.com/ava-labs/avalanchego/snow/engine/common" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/warp" +) + +var ( + _ avalancheWarp.Signer = (*p2pSignerVerifier)(nil) + _ acp118.Verifier = (*p2pSignerVerifier)(nil) +) + +const ( + ParseErrCode = iota + GetSigErrCode + MarshalErrCode + ValidateErrCode +) + +var ( + errUnknownPayloadType = fmt.Errorf("unknown payload type") + errFailedToParse = fmt.Errorf("failed to parse payload") + errFailedToGetSig = fmt.Errorf("failed to get signature") +) + +type SignerVerifier interface { + acp118.Verifier + avalancheWarp.Signer +} + +// p2pSignerVerifier serves warp signature requests using the p2p +// framework from avalanchego. It is a peer.RequestHandler for +// message.MessageSignatureRequest. +type p2pSignerVerifier struct { + backend warp.Backend + codec codec.Manager + stats *handlerStats +} + +func NewSignerVerifier(backend warp.Backend, codec codec.Manager) SignerVerifier { + return &p2pSignerVerifier{ + backend: backend, + codec: codec, + stats: newStats(), + } +} + +func (s *p2pSignerVerifier) Verify(_ context.Context, unsignedMessage *avalancheWarp.UnsignedMessage, _ []byte) *common.AppError { + parsed, err := payload.Parse(unsignedMessage.Payload) + if err != nil { + return &common.AppError{ + Code: ParseErrCode, + Message: "failed to parse payload: " + err.Error(), + } + } + + switch p := parsed.(type) { + case *payload.AddressedCall: + err = s.backend.ValidateMessage(unsignedMessage) + if err != nil { + s.stats.IncMessageSignatureValidationFail() + return &common.AppError{ + Code: ValidateErrCode, + Message: "failed to validate message: " + err.Error(), + } + } + case *payload.Hash: + err = s.backend.ValidateBlockMessage(p.Hash) + if err != nil { + s.stats.IncBlockSignatureValidationFail() + return &common.AppError{ + Code: ValidateErrCode, + Message: "failed to validate block message: " + err.Error(), + } + } + default: + return &common.AppError{ + Code: ParseErrCode, + Message: fmt.Sprintf("unknown payload type: %T", p), + } + } + return nil +} + +func (s *p2pSignerVerifier) Sign(unsignedMessage *avalancheWarp.UnsignedMessage) ([]byte, error) { + parsed, err := payload.Parse(unsignedMessage.Payload) + if err != nil { + return nil, fmt.Errorf("%w: %s", errFailedToParse, err.Error()) + } + + var sig []byte + switch p := parsed.(type) { + case *payload.AddressedCall: + sig, err = s.GetMessageSignature(unsignedMessage) + if err != nil { + s.stats.IncMessageSignatureMiss() + } else { + s.stats.IncMessageSignatureHit() + } + case *payload.Hash: + sig, err = s.GetBlockSignature(p.Hash) + if err != nil { + s.stats.IncBlockSignatureMiss() + } else { + s.stats.IncBlockSignatureHit() + } + default: + return nil, fmt.Errorf("%w: %T", errUnknownPayloadType, p) + } + + if err != nil { + return nil, fmt.Errorf("%w: %s", errFailedToGetSig, err.Error()) + } + + return sig, nil +} + +func (s *p2pSignerVerifier) GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([]byte, error) { + startTime := time.Now() + s.stats.IncMessageSignatureRequest() + + // Always report signature request time + defer func() { + s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) + }() + + // TODO: consider changing backend to return []byte + sig, err := s.backend.GetMessageSignature(message) + return sig[:], err +} + +func (s *p2pSignerVerifier) GetBlockSignature(blockID ids.ID) ([]byte, error) { + startTime := time.Now() + s.stats.IncBlockSignatureRequest() + + // Always report signature request time + defer func() { + s.stats.UpdateBlockSignatureRequestTime(time.Since(startTime)) + }() + + // TODO: consider changing backend to return []byte + sig, err := s.backend.GetBlockSignature(blockID) + return sig[:], err +} diff --git a/warp/handlers/signature_request_p2p_test.go b/warp/handlers/signer_verifier_p2p_test.go similarity index 82% rename from warp/handlers/signature_request_p2p_test.go rename to warp/handlers/signer_verifier_p2p_test.go index 3104fe59b3..76eba68317 100644 --- a/warp/handlers/signature_request_p2p_test.go +++ b/warp/handlers/signer_verifier_p2p_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/proto" ) -func TestMessageSignatureHandlerP2P(t *testing.T) { +func TestMessageSignatures(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() blsSecretKey, err := bls.NewSecretKey() @@ -64,9 +65,11 @@ func TestMessageSignatureHandlerP2P(t *testing.T) { }, verifyStats: func(t *testing.T, stats *handlerStats) { require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 1, stats.messageSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) }, @@ -77,9 +80,11 @@ func TestMessageSignatureHandlerP2P(t *testing.T) { }, verifyStats: func(t *testing.T, stats *handlerStats) { require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 1, stats.messageSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) }, @@ -89,21 +94,23 @@ func TestMessageSignatureHandlerP2P(t *testing.T) { return sdk.SignatureRequest{Message: unknownMessage.Bytes()}, nil }, verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 1, stats.messageSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 1, stats.messageSignatureMiss.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) }, - err: &common.AppError{Code: ErrFailedToGetSig}, + err: &common.AppError{Code: ValidateErrCode}, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - handler := NewSignatureRequestHandlerP2P(backend, message.Codec) - handler.stats.Clear() + signerVerifier := NewSignerVerifier(backend, message.Codec) + handler := acp118.NewHandler(signerVerifier, signerVerifier) request, expectedResponse := test.setup() requestBytes, err := proto.Marshal(&request) @@ -115,7 +122,7 @@ func TestMessageSignatureHandlerP2P(t *testing.T) { require.Nil(t, appErr) } - test.verifyStats(t, handler.stats) + test.verifyStats(t, signerVerifier.(*p2pSignerVerifier).stats) // If the expected response is empty, assert that the handler returns an empty response and return early. if len(expectedResponse) == 0 { @@ -131,7 +138,7 @@ func TestMessageSignatureHandlerP2P(t *testing.T) { } } -func TestBlockSignatureHandlerP2P(t *testing.T) { +func TestBlockSignatures(t *testing.T) { database := memdb.New() snowCtx := utils.TestSnowContext() blsSecretKey, err := bls.NewSecretKey() @@ -176,9 +183,11 @@ func TestBlockSignatureHandlerP2P(t *testing.T) { }, verifyStats: func(t *testing.T, stats *handlerStats) { require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) require.EqualValues(t, 1, stats.blockSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 1, stats.blockSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) }, @@ -189,20 +198,22 @@ func TestBlockSignatureHandlerP2P(t *testing.T) { }, verifyStats: func(t *testing.T, stats *handlerStats) { require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) + require.EqualValues(t, 1, stats.blockSignatureValidationFail.Snapshot().Count()) require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureMiss.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) }, - err: &common.AppError{Code: ErrFailedToGetSig}, + err: &common.AppError{Code: ValidateErrCode}, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { - handler := NewSignatureRequestHandlerP2P(backend, message.Codec) - handler.stats.Clear() + signerVerifier := NewSignerVerifier(backend, message.Codec) + handler := acp118.NewHandler(signerVerifier, signerVerifier) request, expectedResponse := test.setup() requestBytes, err := proto.Marshal(&request) @@ -214,7 +225,7 @@ func TestBlockSignatureHandlerP2P(t *testing.T) { require.Nil(t, appErr) } - test.verifyStats(t, handler.stats) + test.verifyStats(t, signerVerifier.(*p2pSignerVerifier).stats) // If the expected response is empty, assert that the handler returns an empty response and return early. if len(expectedResponse) == 0 { diff --git a/warp/handlers/stats.go b/warp/handlers/stats.go index 481f2aaac0..545013fcd3 100644 --- a/warp/handlers/stats.go +++ b/warp/handlers/stats.go @@ -12,11 +12,13 @@ import ( type handlerStats struct { // MessageSignatureRequestHandler metrics messageSignatureRequest metrics.Counter + messageSignatureValidationFail metrics.Counter messageSignatureHit metrics.Counter messageSignatureMiss metrics.Counter messageSignatureRequestDuration metrics.Gauge // BlockSignatureRequestHandler metrics blockSignatureRequest metrics.Counter + blockSignatureValidationFail metrics.Counter blockSignatureHit metrics.Counter blockSignatureMiss metrics.Counter blockSignatureRequestDuration metrics.Gauge @@ -24,36 +26,34 @@ type handlerStats struct { func newStats() *handlerStats { return &handlerStats{ - messageSignatureRequest: metrics.GetOrRegisterCounter("message_signature_request_count", nil), - messageSignatureHit: metrics.GetOrRegisterCounter("message_signature_request_hit", nil), - messageSignatureMiss: metrics.GetOrRegisterCounter("message_signature_request_miss", nil), - messageSignatureRequestDuration: metrics.GetOrRegisterGauge("message_signature_request_duration", nil), - blockSignatureRequest: metrics.GetOrRegisterCounter("block_signature_request_count", nil), - blockSignatureHit: metrics.GetOrRegisterCounter("block_signature_request_hit", nil), - blockSignatureMiss: metrics.GetOrRegisterCounter("block_signature_request_miss", nil), - blockSignatureRequestDuration: metrics.GetOrRegisterGauge("block_signature_request_duration", nil), + messageSignatureRequest: metrics.NewRegisteredCounter("message_signature_request_count", nil), + messageSignatureValidationFail: metrics.NewRegisteredCounter("message_signature_request_validation_fail", nil), + messageSignatureHit: metrics.NewRegisteredCounter("message_signature_request_hit", nil), + messageSignatureMiss: metrics.NewRegisteredCounter("message_signature_request_miss", nil), + messageSignatureRequestDuration: metrics.NewRegisteredGauge("message_signature_request_duration", nil), + blockSignatureRequest: metrics.NewRegisteredCounter("block_signature_request_count", nil), + blockSignatureValidationFail: metrics.NewRegisteredCounter("block_signature_request_validation_fail", nil), + blockSignatureHit: metrics.NewRegisteredCounter("block_signature_request_hit", nil), + blockSignatureMiss: metrics.NewRegisteredCounter("block_signature_request_miss", nil), + blockSignatureRequestDuration: metrics.NewRegisteredGauge("block_signature_request_duration", nil), } } func (h *handlerStats) IncMessageSignatureRequest() { h.messageSignatureRequest.Inc(1) } -func (h *handlerStats) IncMessageSignatureHit() { h.messageSignatureHit.Inc(1) } -func (h *handlerStats) IncMessageSignatureMiss() { h.messageSignatureMiss.Inc(1) } +func (h *handlerStats) IncMessageSignatureValidationFail() { + h.messageSignatureValidationFail.Inc(1) +} +func (h *handlerStats) IncMessageSignatureHit() { h.messageSignatureHit.Inc(1) } +func (h *handlerStats) IncMessageSignatureMiss() { h.messageSignatureMiss.Inc(1) } func (h *handlerStats) UpdateMessageSignatureRequestTime(duration time.Duration) { h.messageSignatureRequestDuration.Inc(int64(duration)) } func (h *handlerStats) IncBlockSignatureRequest() { h.blockSignatureRequest.Inc(1) } -func (h *handlerStats) IncBlockSignatureHit() { h.blockSignatureHit.Inc(1) } -func (h *handlerStats) IncBlockSignatureMiss() { h.blockSignatureMiss.Inc(1) } +func (h *handlerStats) IncBlockSignatureValidationFail() { + h.blockSignatureValidationFail.Inc(1) +} +func (h *handlerStats) IncBlockSignatureHit() { h.blockSignatureHit.Inc(1) } +func (h *handlerStats) IncBlockSignatureMiss() { h.blockSignatureMiss.Inc(1) } func (h *handlerStats) UpdateBlockSignatureRequestTime(duration time.Duration) { h.blockSignatureRequestDuration.Inc(int64(duration)) } -func (h *handlerStats) Clear() { - h.messageSignatureRequest.Clear() - h.messageSignatureHit.Clear() - h.messageSignatureMiss.Clear() - h.messageSignatureRequestDuration.Update(0) - h.blockSignatureRequest.Clear() - h.blockSignatureHit.Clear() - h.blockSignatureMiss.Clear() - h.blockSignatureRequestDuration.Update(0) -} From 701e2ec5e28da9e78d2a103a6fa2b343568ba4a1 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 9 Oct 2024 17:33:23 +0300 Subject: [PATCH 42/98] avoid revalidating in sign --- warp/handlers/signer_verifier_p2p.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/warp/handlers/signer_verifier_p2p.go b/warp/handlers/signer_verifier_p2p.go index db24fe9561..6237217ee2 100644 --- a/warp/handlers/signer_verifier_p2p.go +++ b/warp/handlers/signer_verifier_p2p.go @@ -137,7 +137,7 @@ func (s *p2pSignerVerifier) GetMessageSignature(message *avalancheWarp.UnsignedM }() // TODO: consider changing backend to return []byte - sig, err := s.backend.GetMessageSignature(message) + sig, err := s.backend.SignMessage(message) return sig[:], err } @@ -151,6 +151,6 @@ func (s *p2pSignerVerifier) GetBlockSignature(blockID ids.ID) ([]byte, error) { }() // TODO: consider changing backend to return []byte - sig, err := s.backend.GetBlockSignature(blockID) + sig, err := s.backend.SignBlock(blockID) return sig[:], err } From 1c98afaedb3cfbd4e4ea3dfe7d21a10cdc241fcc Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 13 Oct 2024 12:52:39 +0300 Subject: [PATCH 43/98] refactor warp backend to use acp118 handler --- go.mod | 2 +- go.sum | 4 + plugin/evm/vm.go | 42 ++-- plugin/evm/vm_warp_test.go | 8 +- warp/backend.go | 180 ++++------------ warp/backend_test.go | 59 +----- warp/handlers/signature_request.go | 9 +- warp/handlers/signature_request_test.go | 7 +- warp/handlers/signer_verifier_p2p.go | 156 -------------- warp/handlers/signer_verifier_p2p_test.go | 242 ---------------------- warp/handlers/stats.go | 18 +- warp/messages/payload.go | 6 - warp/stats.go | 36 ++++ warp/verifier_backend.go | 105 ++++++++++ warp/verifier_backend_test.go | 222 ++++++++++++++++++++ warp/wrapped_cache.go | 52 +++++ 16 files changed, 515 insertions(+), 633 deletions(-) delete mode 100644 warp/handlers/signer_verifier_p2p.go delete mode 100644 warp/handlers/signer_verifier_p2p_test.go create mode 100644 warp/stats.go create mode 100644 warp/verifier_backend.go create mode 100644 warp/verifier_backend_test.go create mode 100644 warp/wrapped_cache.go diff --git a/go.mod b/go.mod index 08265830d2..e11faaf417 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241004185122-5285749cc752 + github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241009195818-ab58a805404b github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 1693074e83..a2d380588b 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,10 @@ github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241004185122-5285749cc752 h1:pQh5HA1sf31baNmN6RZpI02JOLmv7o7TcBY4VpuOSMI= github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241004185122-5285749cc752/go.mod h1:qSHmog3wMVjo/ruIAQo0ppXAilyni07NIu5K88RyhWE= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241009154434-5d66c421bf61 h1:Emh/obJT0CU4rfS4dkLOFqcHuM66yHFkRynUzFUCX0Q= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241009154434-5d66c421bf61/go.mod h1:qSHmog3wMVjo/ruIAQo0ppXAilyni07NIu5K88RyhWE= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241009195818-ab58a805404b h1:gYej1NcDzOMGi03rgO0g8UiJVV/eCG33iVqs3HhDw4Y= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241009195818-ab58a805404b/go.mod h1:qSHmog3wMVjo/ruIAQo0ppXAilyni07NIu5K88RyhWE= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index b874b6e53d..02a9420211 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -16,7 +16,10 @@ import ( "sync" "time" + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/prometheus/client_golang/prometheus" @@ -44,7 +47,6 @@ import ( statesyncclient "github.com/ava-labs/subnet-evm/sync/client" "github.com/ava-labs/subnet-evm/sync/client/stats" "github.com/ava-labs/subnet-evm/warp" - "github.com/ava-labs/subnet-evm/warp/handlers" // Force-load tracer engine to trigger registration // @@ -70,7 +72,6 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" @@ -487,13 +488,19 @@ func (vm *VM) Initialize( for i, hexMsg := range vm.config.WarpOffChainMessages { offchainWarpMessages[i] = []byte(hexMsg) } + warpSignatureCache := &cache.LRU[ids.ID, []byte]{Size: warpSignatureCacheSize} + meteredCache, err := metercacher.New("warp_signature_cache", vm.sdkMetrics, warpSignatureCache) + if err != nil { + return fmt.Errorf("failed to create warp signature cache: %w", err) + } + vm.warpBackend, err = warp.NewBackend( vm.ctx.NetworkID, vm.ctx.ChainID, vm.ctx.WarpSigner, vm, vm.warpDB, - warpSignatureCacheSize, + meteredCache, offchainWarpMessages, ) if err != nil { @@ -502,7 +509,7 @@ func (vm *VM) Initialize( // clear warpdb on initialization if config enabled if vm.config.PruneWarpDB { - if err := vm.warpBackend.Clear(); err != nil { + if err := database.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { return fmt.Errorf("failed to prune warpDB: %w", err) } } @@ -513,7 +520,16 @@ func (vm *VM) Initialize( go vm.ctx.Log.RecoverAndPanic(vm.startContinuousProfiler) - vm.initializeHandlers() + // Add p2p warp message warpHandler + warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) + vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) + + vm.setAppRequestHandlers() + + vm.StateSyncServer = NewStateSyncServer(&stateSyncServerConfig{ + Chain: vm.blockChain, + SyncableInterval: vm.config.StateSyncCommitInterval, + }) return vm.initializeStateSyncClient(lastAcceptedHeight) } @@ -617,22 +633,6 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { return nil } -// initializeHandlers should be called after [vm.chain] is initialized. -func (vm *VM) initializeHandlers() { - vm.StateSyncServer = NewStateSyncServer(&stateSyncServerConfig{ - Chain: vm.blockChain, - SyncableInterval: vm.config.StateSyncCommitInterval, - }) - - // Add p2p warp message warpHandler - signerVerifier := handlers.NewSignerVerifier(vm.warpBackend, vm.networkCodec) - // TODO: consider chancing acp118 handler to take a single Signer interface - warpHandler := acp118.NewHandler(signerVerifier, signerVerifier) - vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) - - vm.setAppRequestHandlers() -} - func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { block := vm.newBlock(lastAcceptedBlock) diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index abaf68f4fc..0297f6df62 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -751,6 +751,8 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { require.NoError(t, err) signature, err := vm.warpBackend.GetMessageSignature(warpMessage) require.NoError(t, err) + var knownSignature [bls.SignatureLen]byte + copy(knownSignature[:], signature) tests := map[string]struct { messageID ids.ID @@ -758,7 +760,7 @@ func TestMessageSignatureRequestsToVM(t *testing.T) { }{ "known": { messageID: warpMessage.ID(), - expectedResponse: signature, + expectedResponse: knownSignature, }, "unknown": { messageID: ids.GenerateTestID(), @@ -807,6 +809,8 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { signature, err := vm.warpBackend.GetBlockSignature(lastAcceptedID) require.NoError(t, err) + var knownSignature [bls.SignatureLen]byte + copy(knownSignature[:], signature) tests := map[string]struct { blockID ids.ID @@ -814,7 +818,7 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { }{ "known": { blockID: lastAcceptedID, - expectedResponse: signature, + expectedResponse: knownSignature, }, "unknown": { blockID: ids.GenerateTestID(), diff --git a/warp/backend.go b/warp/backend.go index b67be9ac70..8d60fee41e 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -11,21 +11,19 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/subnet-evm/warp/messages" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" ) var ( _ Backend = &backend{} errParsingOffChainMessage = errors.New("failed to parse off-chain message") -) -const batchSize = ethdb.IdealBatchSize + messageCacheSize = 500 +) type BlockClient interface { GetAcceptedBlock(ctx context.Context, blockID ids.ID) (snowman.Block, error) @@ -38,30 +36,17 @@ type Backend interface { AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error // GetMessageSignature validates the message and returns the signature of the requested message. - GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) + GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([]byte, error) // GetBlockSignature validates blockID and returns the signature of the requested message hash. - GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) + GetBlockSignature(blockID ids.ID) ([]byte, error) // GetMessage retrieves the [unsignedMessage] from the warp backend database if available - // TODO: After E-Upgrade, the backend no longer needs to store the mapping from messageHash + // TODO: After Etna, the backend no longer needs to store the mapping from messageHash // to unsignedMessage (and this method can be removed). GetMessage(messageHash ids.ID) (*avalancheWarp.UnsignedMessage, error) - // ValidateMessage validates the [unsignedMessage] and returns an error if the message is invalid. - ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error - - // ValidateBlockMessage validates the block message with the given [blockID] and returns an error if the message is invalid. - ValidateBlockMessage(blockID ids.ID) error - - // SignMessage signs the [unsignedMessage] and returns the signature. - SignMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) - - // SignBlock signs the block message with the given [blockID] and returns the signature. - SignBlock(blockID ids.ID) ([bls.SignatureLen]byte, error) - - // Clear clears the entire db - Clear() error + acp118.Verifier } // backend implements Backend, keeps track of warp messages, and generates message signatures. @@ -71,10 +56,10 @@ type backend struct { db database.Database warpSigner avalancheWarp.Signer blockClient BlockClient - messageSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] - blockSignatureCache *cache.LRU[ids.ID, [bls.SignatureLen]byte] + signatureCache cache.Cacher[ids.ID, []byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage + stats *verifierStats } // NewBackend creates a new Backend, and initializes the signature cache and message tracking database. @@ -84,18 +69,20 @@ func NewBackend( warpSigner avalancheWarp.Signer, blockClient BlockClient, db database.Database, - cacheSize int, + sdkCache cache.Cacher[ids.ID, []byte], offchainMessages [][]byte, ) (Backend, error) { b := &backend{ - networkID: networkID, - sourceChainID: sourceChainID, - db: db, - warpSigner: warpSigner, - blockClient: blockClient, - messageSignatureCache: &cache.LRU[ids.ID, [bls.SignatureLen]byte]{Size: cacheSize}, - blockSignatureCache: &cache.LRU[ids.ID, [bls.SignatureLen]byte]{Size: cacheSize}, - messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: cacheSize}, + networkID: networkID, + sourceChainID: sourceChainID, + db: db, + warpSigner: warpSigner, + blockClient: blockClient, + // sdkCache returns sdk.SignatureResponse proto bytes, + // and it must be wrapped to return Signature bytes. + signatureCache: NewWrappedCache(sdkCache), + messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: messageCacheSize}, + stats: newVerifierStats(), offchainAddressedCallMsgs: make(map[ids.ID]*avalancheWarp.UnsignedMessage), } return b, b.initOffChainMessages(offchainMessages) @@ -126,13 +113,6 @@ func (b *backend) initOffChainMessages(offchainMessages [][]byte) error { return nil } -func (b *backend) Clear() error { - b.messageSignatureCache.Flush() - b.blockSignatureCache.Flush() - b.messageCache.Flush() - return database.Clear(b.db, batchSize) -} - func (b *backend) AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { messageID := unsignedMessage.ID() @@ -143,110 +123,56 @@ func (b *backend) AddMessage(unsignedMessage *avalancheWarp.UnsignedMessage) err return fmt.Errorf("failed to put warp signature in db: %w", err) } - var signature [bls.SignatureLen]byte sig, err := b.warpSigner.Sign(unsignedMessage) if err != nil { return fmt.Errorf("failed to sign warp message: %w", err) } - copy(signature[:], sig) - b.messageSignatureCache.Put(messageID, signature) + b.signatureCache.Put(messageID, sig) log.Debug("Adding warp message to backend", "messageID", messageID) return nil } -func (b *backend) GetMessageSignature(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { +func (b *backend) GetMessageSignature(unsignedMessage *avalancheWarp.UnsignedMessage) ([]byte, error) { messageID := unsignedMessage.ID() log.Debug("Getting warp message from backend", "messageID", messageID) - if sig, ok := b.messageSignatureCache.Get(messageID); ok { + if sig, ok := b.signatureCache.Get(messageID); ok { return sig, nil } - if err := b.ValidateMessage(unsignedMessage); err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to validate warp message: %w", err) - } - return b.signMessage(unsignedMessage) -} - -func (b *backend) SignMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { - messageID := unsignedMessage.ID() - - if sig, ok := b.messageSignatureCache.Get(messageID); ok { - return sig, nil + if err := b.verifyMessage(unsignedMessage); err != nil { + return []byte{}, fmt.Errorf("failed to validate warp message: %w", err) } - return b.signMessage(unsignedMessage) } -func (b *backend) ValidateMessage(unsignedMessage *avalancheWarp.UnsignedMessage) error { - messageID := unsignedMessage.ID() - - if _, ok := b.messageSignatureCache.Get(messageID); ok { - return nil - } - // Known on-chain messages should be signed - if _, err := b.GetMessage(messageID); err == nil { - return nil - } - - // Try to parse the payload as an AddressedCall - addressedCall, err := payload.ParseAddressedCall(unsignedMessage.Payload) - if err != nil { - return fmt.Errorf("failed to parse unknown message as AddressedCall: %w", err) - } +func (b *backend) GetBlockSignature(blockID ids.ID) ([]byte, error) { + log.Debug("Getting block from backend", "blockID", blockID) - // Further, parse the payload to see if it is a known type. - parsed, err := messages.Parse(addressedCall.Payload) + blockHashPayload, err := payload.NewHash(blockID) if err != nil { - return fmt.Errorf("failed to parse unknown message: %w", err) + return []byte{}, fmt.Errorf("failed to create new block hash payload: %w", err) } - // Check if the message is a known type that can be signed on demand - signable, ok := parsed.(messages.Signable) - if !ok { - return fmt.Errorf("parsed message is not Signable: %T", signable) - } - - // Check if the message should be signed according to its type - if err := signable.VerifyMesssage(addressedCall.SourceAddress); err != nil { - return fmt.Errorf("failed to verify Signable message: %w", err) - } - return nil -} - -func (b *backend) ValidateBlockMessage(blockID ids.ID) error { - if _, ok := b.blockSignatureCache.Get(blockID); ok { - return nil - } - - _, err := b.blockClient.GetAcceptedBlock(context.TODO(), blockID) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(b.networkID, b.sourceChainID, blockHashPayload.Bytes()) if err != nil { - return fmt.Errorf("failed to get block %s: %w", blockID, err) + return []byte{}, fmt.Errorf("failed to create new unsigned warp message: %w", err) } - return nil -} - -func (b *backend) GetBlockSignature(blockID ids.ID) ([bls.SignatureLen]byte, error) { - log.Debug("Getting block from backend", "blockID", blockID) - if sig, ok := b.blockSignatureCache.Get(blockID); ok { + if sig, ok := b.signatureCache.Get(unsignedMessage.ID()); ok { return sig, nil } - if err := b.ValidateBlockMessage(blockID); err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to validate block message: %w", err) + if err := b.verifyBlockMessage(blockHashPayload); err != nil { + return []byte{}, fmt.Errorf("failed to validate block message: %w", err) } - return b.signBlock(blockID) -} - -func (b *backend) SignBlock(blockID ids.ID) ([bls.SignatureLen]byte, error) { - if sig, ok := b.blockSignatureCache.Get(blockID); ok { - return sig, nil + sig, err := b.signMessage(unsignedMessage) + if err != nil { + return []byte{}, fmt.Errorf("failed to sign block message: %w", err) } - - return b.signBlock(blockID) + return sig, nil } func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, error) { @@ -271,34 +197,12 @@ func (b *backend) GetMessage(messageID ids.ID) (*avalancheWarp.UnsignedMessage, return unsignedMessage, nil } -func (b *backend) signMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([bls.SignatureLen]byte, error) { - sig, err := b.warpSigner.Sign(unsignedMessage) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) - } - - var signature [bls.SignatureLen]byte - copy(signature[:], sig) - b.messageSignatureCache.Put(unsignedMessage.ID(), signature) - return signature, nil -} - -func (b *backend) signBlock(blockID ids.ID) ([bls.SignatureLen]byte, error) { - blockHashPayload, err := payload.NewHash(blockID) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new block hash payload: %w", err) - } - unsignedMessage, err := avalancheWarp.NewUnsignedMessage(b.networkID, b.sourceChainID, blockHashPayload.Bytes()) - if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to create new unsigned warp message: %w", err) - } +func (b *backend) signMessage(unsignedMessage *avalancheWarp.UnsignedMessage) ([]byte, error) { sig, err := b.warpSigner.Sign(unsignedMessage) if err != nil { - return [bls.SignatureLen]byte{}, fmt.Errorf("failed to sign warp message: %w", err) + return []byte{}, fmt.Errorf("failed to sign warp message: %w", err) } - var signature [bls.SignatureLen]byte - copy(signature[:], sig) - b.blockSignatureCache.Put(blockID, signature) - return signature, nil + b.signatureCache.Put(unsignedMessage.ID(), sig) + return sig, nil } diff --git a/warp/backend_test.go b/warp/backend_test.go index 21013dfc24..91129bd104 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -6,6 +6,7 @@ package warp import ( "testing" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" @@ -35,56 +36,14 @@ func init() { } } -func TestClearDB(t *testing.T) { - db := memdb.New() - - sk, err := bls.NewSecretKey() - require.NoError(t, err) - warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backendIntf, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) - require.NoError(t, err) - backend, ok := backendIntf.(*backend) - require.True(t, ok) - - // use multiple messages to test that all messages get cleared - payloads := [][]byte{[]byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), []byte("test5")} - messages := make([]*avalancheWarp.UnsignedMessage, 0, len(payloads)) - - // add all messages - for _, payload := range payloads { - unsignedMsg, err := avalancheWarp.NewUnsignedMessage(networkID, sourceChainID, payload) - require.NoError(t, err) - messages = append(messages, unsignedMsg) - err = backend.AddMessage(unsignedMsg) - require.NoError(t, err) - // ensure that the message was added - _, err = backend.GetMessageSignature(unsignedMsg) - require.NoError(t, err) - } - - err = backend.Clear() - require.NoError(t, err) - require.Zero(t, backend.messageCache.Len()) - require.Zero(t, backend.messageSignatureCache.Len()) - require.Zero(t, backend.blockSignatureCache.Len()) - it := db.NewIterator() - defer it.Release() - require.False(t, it.Next()) - - // ensure all messages have been deleted - for _, message := range messages { - _, err := backend.GetMessageSignature(message) - require.ErrorContains(t, err, "failed to validate warp message") - } -} - func TestAddAndGetValidMessage(t *testing.T) { db := memdb.New() sk, err := bls.NewSecretKey() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -106,7 +65,8 @@ func TestAddAndGetUnknownMessage(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 500, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) require.NoError(t, err) // Try getting a signature for a message that was not added. @@ -124,7 +84,8 @@ func TestGetBlockSignature(t *testing.T) { sk, err := bls.NewSecretKey() require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) - backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, db, 500, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, db, messageSignatureCache, nil) require.NoError(err) blockHashPayload, err := payload.NewHash(blkID) @@ -150,7 +111,8 @@ func TestZeroSizedCache(t *testing.T) { warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) // Verify zero sized cache works normally, because the lru cache will be initialized to size 1 for any size parameter <= 0. - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 0, nil) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -203,7 +165,8 @@ func TestOffChainMessages(t *testing.T) { require := require.New(t) db := memdb.New() - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, 0, test.offchainMessages) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, test.offchainMessages) require.ErrorIs(err, test.err) if test.check != nil { test.check(require, backend) diff --git a/warp/handlers/signature_request.go b/warp/handlers/signature_request.go index 3a28cd994e..1799774b5c 100644 --- a/warp/handlers/signature_request.go +++ b/warp/handlers/signature_request.go @@ -16,7 +16,7 @@ import ( ) // SignatureRequestHandler serves warp signature requests. It is a peer.RequestHandler for message.MessageSignatureRequest. -// TODO: After E-Upgrade, this handler can be removed and SignatureRequestHandlerP2P is sufficient. +// TODO: After Etna, this handler can be removed and SignatureRequestHandlerP2P is sufficient. type SignatureRequestHandler struct { backend warp.Backend codec codec.Manager @@ -51,13 +51,14 @@ func (s *SignatureRequestHandler) OnMessageSignatureRequest(ctx context.Context, log.Debug("Unknown warp message requested", "messageID", signatureRequest.MessageID) s.stats.IncMessageSignatureMiss() } else { - signature, err = s.backend.GetMessageSignature(unsignedMessage) + sig, err := s.backend.GetMessageSignature(unsignedMessage) if err != nil { log.Debug("Unknown warp signature requested", "messageID", signatureRequest.MessageID) s.stats.IncMessageSignatureMiss() signature = [bls.SignatureLen]byte{} } else { s.stats.IncMessageSignatureHit() + copy(signature[:], sig) } } @@ -80,13 +81,15 @@ func (s *SignatureRequestHandler) OnBlockSignatureRequest(ctx context.Context, n s.stats.UpdateBlockSignatureRequestTime(time.Since(startTime)) }() - signature, err := s.backend.GetBlockSignature(request.BlockID) + var signature [bls.SignatureLen]byte + sig, err := s.backend.GetBlockSignature(request.BlockID) if err != nil { log.Debug("Unknown warp signature requested", "blockID", request.BlockID) s.stats.IncBlockSignatureMiss() signature = [bls.SignatureLen]byte{} } else { s.stats.IncBlockSignatureHit() + copy(signature[:], sig) } response := message.SignatureResponse{Signature: signature} diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 455ac268e2..8320b9cea1 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -7,6 +7,7 @@ import ( "context" "testing" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/bls" @@ -31,7 +32,8 @@ func TestMessageSignatureHandler(t *testing.T) { offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, addressedPayload.Bytes()) require.NoError(t, err) - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, 100, [][]byte{offchainMessage.Bytes()}) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} + backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) @@ -131,13 +133,14 @@ func TestBlockSignatureHandler(t *testing.T) { warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) blkID := ids.GenerateTestID() blockClient := warptest.MakeBlockClient(blkID) + messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} backend, err := warp.NewBackend( snowCtx.NetworkID, snowCtx.ChainID, warpSigner, blockClient, database, - 100, + messageSignatureCache, nil, ) require.NoError(t, err) diff --git a/warp/handlers/signer_verifier_p2p.go b/warp/handlers/signer_verifier_p2p.go deleted file mode 100644 index 6237217ee2..0000000000 --- a/warp/handlers/signer_verifier_p2p.go +++ /dev/null @@ -1,156 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package handlers - -import ( - "context" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p/acp118" - "github.com/ava-labs/avalanchego/snow/engine/common" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/subnet-evm/warp" -) - -var ( - _ avalancheWarp.Signer = (*p2pSignerVerifier)(nil) - _ acp118.Verifier = (*p2pSignerVerifier)(nil) -) - -const ( - ParseErrCode = iota - GetSigErrCode - MarshalErrCode - ValidateErrCode -) - -var ( - errUnknownPayloadType = fmt.Errorf("unknown payload type") - errFailedToParse = fmt.Errorf("failed to parse payload") - errFailedToGetSig = fmt.Errorf("failed to get signature") -) - -type SignerVerifier interface { - acp118.Verifier - avalancheWarp.Signer -} - -// p2pSignerVerifier serves warp signature requests using the p2p -// framework from avalanchego. It is a peer.RequestHandler for -// message.MessageSignatureRequest. -type p2pSignerVerifier struct { - backend warp.Backend - codec codec.Manager - stats *handlerStats -} - -func NewSignerVerifier(backend warp.Backend, codec codec.Manager) SignerVerifier { - return &p2pSignerVerifier{ - backend: backend, - codec: codec, - stats: newStats(), - } -} - -func (s *p2pSignerVerifier) Verify(_ context.Context, unsignedMessage *avalancheWarp.UnsignedMessage, _ []byte) *common.AppError { - parsed, err := payload.Parse(unsignedMessage.Payload) - if err != nil { - return &common.AppError{ - Code: ParseErrCode, - Message: "failed to parse payload: " + err.Error(), - } - } - - switch p := parsed.(type) { - case *payload.AddressedCall: - err = s.backend.ValidateMessage(unsignedMessage) - if err != nil { - s.stats.IncMessageSignatureValidationFail() - return &common.AppError{ - Code: ValidateErrCode, - Message: "failed to validate message: " + err.Error(), - } - } - case *payload.Hash: - err = s.backend.ValidateBlockMessage(p.Hash) - if err != nil { - s.stats.IncBlockSignatureValidationFail() - return &common.AppError{ - Code: ValidateErrCode, - Message: "failed to validate block message: " + err.Error(), - } - } - default: - return &common.AppError{ - Code: ParseErrCode, - Message: fmt.Sprintf("unknown payload type: %T", p), - } - } - return nil -} - -func (s *p2pSignerVerifier) Sign(unsignedMessage *avalancheWarp.UnsignedMessage) ([]byte, error) { - parsed, err := payload.Parse(unsignedMessage.Payload) - if err != nil { - return nil, fmt.Errorf("%w: %s", errFailedToParse, err.Error()) - } - - var sig []byte - switch p := parsed.(type) { - case *payload.AddressedCall: - sig, err = s.GetMessageSignature(unsignedMessage) - if err != nil { - s.stats.IncMessageSignatureMiss() - } else { - s.stats.IncMessageSignatureHit() - } - case *payload.Hash: - sig, err = s.GetBlockSignature(p.Hash) - if err != nil { - s.stats.IncBlockSignatureMiss() - } else { - s.stats.IncBlockSignatureHit() - } - default: - return nil, fmt.Errorf("%w: %T", errUnknownPayloadType, p) - } - - if err != nil { - return nil, fmt.Errorf("%w: %s", errFailedToGetSig, err.Error()) - } - - return sig, nil -} - -func (s *p2pSignerVerifier) GetMessageSignature(message *avalancheWarp.UnsignedMessage) ([]byte, error) { - startTime := time.Now() - s.stats.IncMessageSignatureRequest() - - // Always report signature request time - defer func() { - s.stats.UpdateMessageSignatureRequestTime(time.Since(startTime)) - }() - - // TODO: consider changing backend to return []byte - sig, err := s.backend.SignMessage(message) - return sig[:], err -} - -func (s *p2pSignerVerifier) GetBlockSignature(blockID ids.ID) ([]byte, error) { - startTime := time.Now() - s.stats.IncBlockSignatureRequest() - - // Always report signature request time - defer func() { - s.stats.UpdateBlockSignatureRequestTime(time.Since(startTime)) - }() - - // TODO: consider changing backend to return []byte - sig, err := s.backend.SignBlock(blockID) - return sig[:], err -} diff --git a/warp/handlers/signer_verifier_p2p_test.go b/warp/handlers/signer_verifier_p2p_test.go deleted file mode 100644 index 76eba68317..0000000000 --- a/warp/handlers/signer_verifier_p2p_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package handlers - -import ( - "context" - "testing" - "time" - - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p/acp118" - "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto/bls" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" - "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/subnet-evm/plugin/evm/message" - "github.com/ava-labs/subnet-evm/utils" - "github.com/ava-labs/subnet-evm/warp" - "github.com/ava-labs/subnet-evm/warp/warptest" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" -) - -func TestMessageSignatures(t *testing.T) { - database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSecretKey() - require.NoError(t, err) - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) - - addressedPayload, err := payload.NewAddressedCall([]byte{1, 2, 3}, []byte{1, 2, 3}) - require.NoError(t, err) - offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, addressedPayload.Bytes()) - require.NoError(t, err) - - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, 100, [][]byte{offchainMessage.Bytes()}) - require.NoError(t, err) - - offchainPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("test")) - require.NoError(t, err) - msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, offchainPayload.Bytes()) - require.NoError(t, err) - require.NoError(t, backend.AddMessage(msg)) - signature, err := backend.GetMessageSignature(msg) - require.NoError(t, err) - offchainSignature, err := backend.GetMessageSignature(offchainMessage) - require.NoError(t, err) - - unknownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("unknown message")) - require.NoError(t, err) - unknownMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, unknownPayload.Bytes()) - require.NoError(t, err) - - tests := map[string]struct { - setup func() (request sdk.SignatureRequest, expectedResponse []byte) - verifyStats func(t *testing.T, stats *handlerStats) - err error - }{ - "known message": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: msg.Bytes()}, signature[:] - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 1, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - }, - "offchain message": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: offchainMessage.Bytes()}, offchainSignature[:] - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 1, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 1, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - }, - "unknown message": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: unknownMessage.Bytes()}, nil - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 1, stats.messageSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - err: &common.AppError{Code: ValidateErrCode}, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - signerVerifier := NewSignerVerifier(backend, message.Codec) - handler := acp118.NewHandler(signerVerifier, signerVerifier) - - request, expectedResponse := test.setup() - requestBytes, err := proto.Marshal(&request) - require.NoError(t, err) - responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, requestBytes) - if test.err != nil { - require.ErrorIs(t, appErr, test.err) - } else { - require.Nil(t, appErr) - } - - test.verifyStats(t, signerVerifier.(*p2pSignerVerifier).stats) - - // If the expected response is empty, assert that the handler returns an empty response and return early. - if len(expectedResponse) == 0 { - require.Len(t, responseBytes, 0, "expected response to be empty") - return - } - var response sdk.SignatureResponse - err = proto.Unmarshal(responseBytes, &response) - require.NoError(t, err, "error unmarshalling SignatureResponse") - - require.Equal(t, expectedResponse, response.Signature) - }) - } -} - -func TestBlockSignatures(t *testing.T) { - database := memdb.New() - snowCtx := utils.TestSnowContext() - blsSecretKey, err := bls.NewSecretKey() - require.NoError(t, err) - - warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) - blkID := ids.GenerateTestID() - blockClient := warptest.MakeBlockClient(blkID) - backend, err := warp.NewBackend( - snowCtx.NetworkID, - snowCtx.ChainID, - warpSigner, - blockClient, - database, - 100, - nil, - ) - require.NoError(t, err) - - signature, err := backend.GetBlockSignature(blkID) - require.NoError(t, err) - unknownBlockID := ids.GenerateTestID() - - toMessageBytes := func(id ids.ID) []byte { - idPayload, err := payload.NewHash(id) - require.NoError(t, err) - - msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, idPayload.Bytes()) - require.NoError(t, err) - - return msg.Bytes() - } - - tests := map[string]struct { - setup func() (request sdk.SignatureRequest, expectedResponse []byte) - verifyStats func(t *testing.T, stats *handlerStats) - err error - }{ - "known block": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: toMessageBytes(blkID)}, signature[:] - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - }, - "unknown block": { - setup: func() (request sdk.SignatureRequest, expectedResponse []byte) { - return sdk.SignatureRequest{Message: toMessageBytes(unknownBlockID)}, nil - }, - verifyStats: func(t *testing.T, stats *handlerStats) { - require.EqualValues(t, 0, stats.messageSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.messageSignatureMiss.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureRequest.Snapshot().Count()) - require.EqualValues(t, 1, stats.blockSignatureValidationFail.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureHit.Snapshot().Count()) - require.EqualValues(t, 0, stats.blockSignatureMiss.Snapshot().Count()) - }, - err: &common.AppError{Code: ValidateErrCode}, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - signerVerifier := NewSignerVerifier(backend, message.Codec) - handler := acp118.NewHandler(signerVerifier, signerVerifier) - - request, expectedResponse := test.setup() - requestBytes, err := proto.Marshal(&request) - require.NoError(t, err) - responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, requestBytes) - if test.err != nil { - require.ErrorIs(t, appErr, test.err) - } else { - require.Nil(t, appErr) - } - - test.verifyStats(t, signerVerifier.(*p2pSignerVerifier).stats) - - // If the expected response is empty, assert that the handler returns an empty response and return early. - if len(expectedResponse) == 0 { - require.Len(t, responseBytes, 0, "expected response to be empty") - return - } - var response sdk.SignatureResponse - err = proto.Unmarshal(responseBytes, &response) - require.NoError(t, err, "error unmarshalling SignatureResponse") - - require.Equal(t, expectedResponse, response.Signature) - }) - } -} diff --git a/warp/handlers/stats.go b/warp/handlers/stats.go index 545013fcd3..1c7a854e97 100644 --- a/warp/handlers/stats.go +++ b/warp/handlers/stats.go @@ -12,13 +12,11 @@ import ( type handlerStats struct { // MessageSignatureRequestHandler metrics messageSignatureRequest metrics.Counter - messageSignatureValidationFail metrics.Counter messageSignatureHit metrics.Counter messageSignatureMiss metrics.Counter messageSignatureRequestDuration metrics.Gauge // BlockSignatureRequestHandler metrics blockSignatureRequest metrics.Counter - blockSignatureValidationFail metrics.Counter blockSignatureHit metrics.Counter blockSignatureMiss metrics.Counter blockSignatureRequestDuration metrics.Gauge @@ -27,12 +25,10 @@ type handlerStats struct { func newStats() *handlerStats { return &handlerStats{ messageSignatureRequest: metrics.NewRegisteredCounter("message_signature_request_count", nil), - messageSignatureValidationFail: metrics.NewRegisteredCounter("message_signature_request_validation_fail", nil), messageSignatureHit: metrics.NewRegisteredCounter("message_signature_request_hit", nil), messageSignatureMiss: metrics.NewRegisteredCounter("message_signature_request_miss", nil), messageSignatureRequestDuration: metrics.NewRegisteredGauge("message_signature_request_duration", nil), blockSignatureRequest: metrics.NewRegisteredCounter("block_signature_request_count", nil), - blockSignatureValidationFail: metrics.NewRegisteredCounter("block_signature_request_validation_fail", nil), blockSignatureHit: metrics.NewRegisteredCounter("block_signature_request_hit", nil), blockSignatureMiss: metrics.NewRegisteredCounter("block_signature_request_miss", nil), blockSignatureRequestDuration: metrics.NewRegisteredGauge("block_signature_request_duration", nil), @@ -40,20 +36,14 @@ func newStats() *handlerStats { } func (h *handlerStats) IncMessageSignatureRequest() { h.messageSignatureRequest.Inc(1) } -func (h *handlerStats) IncMessageSignatureValidationFail() { - h.messageSignatureValidationFail.Inc(1) -} -func (h *handlerStats) IncMessageSignatureHit() { h.messageSignatureHit.Inc(1) } -func (h *handlerStats) IncMessageSignatureMiss() { h.messageSignatureMiss.Inc(1) } +func (h *handlerStats) IncMessageSignatureHit() { h.messageSignatureHit.Inc(1) } +func (h *handlerStats) IncMessageSignatureMiss() { h.messageSignatureMiss.Inc(1) } func (h *handlerStats) UpdateMessageSignatureRequestTime(duration time.Duration) { h.messageSignatureRequestDuration.Inc(int64(duration)) } func (h *handlerStats) IncBlockSignatureRequest() { h.blockSignatureRequest.Inc(1) } -func (h *handlerStats) IncBlockSignatureValidationFail() { - h.blockSignatureValidationFail.Inc(1) -} -func (h *handlerStats) IncBlockSignatureHit() { h.blockSignatureHit.Inc(1) } -func (h *handlerStats) IncBlockSignatureMiss() { h.blockSignatureMiss.Inc(1) } +func (h *handlerStats) IncBlockSignatureHit() { h.blockSignatureHit.Inc(1) } +func (h *handlerStats) IncBlockSignatureMiss() { h.blockSignatureMiss.Inc(1) } func (h *handlerStats) UpdateBlockSignatureRequestTime(duration time.Duration) { h.blockSignatureRequestDuration.Inc(int64(duration)) } diff --git a/warp/messages/payload.go b/warp/messages/payload.go index 3776a1356d..facf54524d 100644 --- a/warp/messages/payload.go +++ b/warp/messages/payload.go @@ -20,12 +20,6 @@ type Payload interface { initialize(b []byte) } -// Signable is an optional interface that payloads can implement to allow -// on-the-fly signing of incoming messages by the warp backend. -type Signable interface { - VerifyMesssage(sourceAddress []byte) error -} - func Parse(bytes []byte) (Payload, error) { var payload Payload if _, err := Codec.Unmarshal(bytes, &payload); err != nil { diff --git a/warp/stats.go b/warp/stats.go new file mode 100644 index 0000000000..7135670f13 --- /dev/null +++ b/warp/stats.go @@ -0,0 +1,36 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "github.com/ava-labs/subnet-evm/metrics" +) + +type verifierStats struct { + messageParseFail metrics.Counter + // AddressedCall metrics + addressedCallSignatureValidationFail metrics.Counter + // BlockRequest metrics + blockSignatureValidationFail metrics.Counter +} + +func newVerifierStats() *verifierStats { + return &verifierStats{ + messageParseFail: metrics.NewRegisteredCounter("message_parse_fail", nil), + addressedCallSignatureValidationFail: metrics.NewRegisteredCounter("addressed_call_signature_validation_fail", nil), + blockSignatureValidationFail: metrics.NewRegisteredCounter("block_signature_validation_fail", nil), + } +} + +func (h *verifierStats) IncAddressedCallSignatureValidationFail() { + h.addressedCallSignatureValidationFail.Inc(1) +} + +func (h *verifierStats) IncBlockSignatureValidationFail() { + h.blockSignatureValidationFail.Inc(1) +} + +func (h *verifierStats) IncMessageParseFail() { + h.messageParseFail.Inc(1) +} diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go new file mode 100644 index 0000000000..64249bbba6 --- /dev/null +++ b/warp/verifier_backend.go @@ -0,0 +1,105 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "fmt" + + "github.com/ava-labs/subnet-evm/warp/messages" + + "github.com/ava-labs/avalanchego/snow/engine/common" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" +) + +const ( + ParseErrCode = iota + 1 + VerifyErrCode +) + +// Verify implements the acp118.Verifier interface +func (b *backend) Verify(_ context.Context, unsignedMessage *avalancheWarp.UnsignedMessage, _ []byte) *common.AppError { + if err := b.verifyMessage(unsignedMessage); err != nil { + return err + } + return nil +} + +// verifyMessage verifies the signature of the message +// This is moved to a separate function to avoid having to use a context.Context +func (b *backend) verifyMessage(unsignedMessage *avalancheWarp.UnsignedMessage) *common.AppError { + messageID := unsignedMessage.ID() + // Known on-chain messages should be signed + if _, err := b.GetMessage(messageID); err == nil { + return nil + } + + parsed, err := payload.Parse(unsignedMessage.Payload) + if err != nil { + b.stats.IncMessageParseFail() + return &common.AppError{ + Code: ParseErrCode, + Message: "failed to parse payload: " + err.Error(), + } + } + + switch p := parsed.(type) { + case *payload.AddressedCall: + apperr := b.verifyAddressedCall(p) + if apperr != nil { + b.stats.IncAddressedCallSignatureValidationFail() + return apperr + } + case *payload.Hash: + apperr := b.verifyBlockMessage(p) + if apperr != nil { + b.stats.IncBlockSignatureValidationFail() + return apperr + } + default: + b.stats.IncMessageParseFail() + return &common.AppError{ + Code: ParseErrCode, + Message: fmt.Sprintf("unknown payload type: %T", p), + } + } + return nil +} + +// verifyBlockMessage verifies the block message (payload.Hash) +func (b *backend) verifyBlockMessage(blockHashPayload *payload.Hash) *common.AppError { + blockID := blockHashPayload.Hash + _, err := b.blockClient.GetAcceptedBlock(context.TODO(), blockID) + if err != nil { + return &common.AppError{ + Code: VerifyErrCode, + Message: fmt.Sprintf("failed to get block %s: %s", blockID, err.Error()), + } + } + + return nil +} + +// verifyAddressedCall verifies the addressed call message +func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *common.AppError { + // Further, parse the payload to see if it is a known type. + parsed, err := messages.Parse(addressedCall.Payload) + if err != nil { + return &common.AppError{ + Code: ParseErrCode, + Message: "failed to parse addressed call message: " + err.Error(), + } + } + + switch p := parsed.(type) { + case *messages.ValidatorUptime: + return nil + default: + return &common.AppError{ + Code: ParseErrCode, + Message: fmt.Sprintf("unknown message type: %T", p), + } + } +} diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go new file mode 100644 index 0000000000..6b0c120db6 --- /dev/null +++ b/warp/verifier_backend_test.go @@ -0,0 +1,222 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "testing" + "time" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/acp118" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/warp/warptest" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +func TestAddressedCallSignatures(t *testing.T) { + database := memdb.New() + snowCtx := utils.TestSnowContext() + blsSecretKey, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + + offChainPayload, err := payload.NewAddressedCall([]byte{1, 2, 3}, []byte{1, 2, 3}) + require.NoError(t, err) + offchainMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, offChainPayload.Bytes()) + require.NoError(t, err) + offchainSignature, err := warpSigner.Sign(offchainMessage) + require.NoError(t, err) + + tests := map[string]struct { + setup func(backend Backend) (request []byte, expectedResponse []byte) + verifyStats func(t *testing.T, stats *verifierStats) + err error + }{ + "known message": { + setup: func(backend Backend) (request []byte, expectedResponse []byte) { + knownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("test")) + require.NoError(t, err) + msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, knownPayload.Bytes()) + require.NoError(t, err) + offchainSignature, err := warpSigner.Sign(msg) + require.NoError(t, err) + + backend.AddMessage(msg) + return msg.Bytes(), offchainSignature[:] + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.addressedCallSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + }, + }, + "offchain message": { + setup: func(_ Backend) (request []byte, expectedResponse []byte) { + return offchainMessage.Bytes(), offchainSignature[:] + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.addressedCallSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + }, + }, + "unknown message": { + setup: func(_ Backend) (request []byte, expectedResponse []byte) { + unknownPayload, err := payload.NewAddressedCall([]byte{0, 0, 0}, []byte("unknown message")) + require.NoError(t, err) + unknownMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, unknownPayload.Bytes()) + require.NoError(t, err) + return unknownMessage.Bytes(), nil + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + require.EqualValues(t, 1, stats.addressedCallSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + }, + err: &common.AppError{Code: ParseErrCode}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sigCache := &cache.LRU[ids.ID, []byte]{Size: 100} + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, sigCache, [][]byte{offchainMessage.Bytes()}) + require.NoError(t, err) + handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + + requestBytes, expectedResponse := test.setup(warpBackend) + protoMsg := &sdk.SignatureRequest{Message: requestBytes} + protoBytes, err := proto.Marshal(protoMsg) + require.NoError(t, err) + responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + if test.err != nil { + require.Error(t, appErr) + require.ErrorIs(t, appErr, test.err) + } else { + require.Nil(t, appErr) + } + + test.verifyStats(t, warpBackend.(*backend).stats) + + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + response := &sdk.SignatureResponse{} + require.NoError(t, proto.Unmarshal(responseBytes, response)) + require.NoError(t, err, "error unmarshalling SignatureResponse") + + require.Equal(t, expectedResponse, response.Signature) + }) + } +} + +func TestBlockSignatures(t *testing.T) { + database := memdb.New() + snowCtx := utils.TestSnowContext() + blsSecretKey, err := bls.NewSecretKey() + require.NoError(t, err) + + warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + blkID := ids.GenerateTestID() + blockClient := warptest.MakeBlockClient(blkID) + + unknownBlockID := ids.GenerateTestID() + + toMessageBytes := func(id ids.ID) []byte { + idPayload, err := payload.NewHash(id) + require.NoError(t, err) + + msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, idPayload.Bytes()) + require.NoError(t, err) + + return msg.Bytes() + } + + tests := map[string]struct { + setup func() (request []byte, expectedResponse []byte) + verifyStats func(t *testing.T, stats *verifierStats) + err error + }{ + "known block": { + setup: func() (request []byte, expectedResponse []byte) { + hashPayload, err := payload.NewHash(blkID) + require.NoError(t, err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, hashPayload.Bytes()) + require.NoError(t, err) + signature, err := warpSigner.Sign(unsignedMessage) + require.NoError(t, err) + return toMessageBytes(blkID), signature[:] + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.addressedCallSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.blockSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + }, + }, + "unknown block": { + setup: func() (request []byte, expectedResponse []byte) { + return toMessageBytes(unknownBlockID), nil + }, + verifyStats: func(t *testing.T, stats *verifierStats) { + require.EqualValues(t, 0, stats.addressedCallSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 1, stats.blockSignatureValidationFail.Snapshot().Count()) + require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) + }, + err: &common.AppError{Code: VerifyErrCode}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sigCache := &cache.LRU[ids.ID, []byte]{Size: 100} + warpBackend, err := NewBackend( + snowCtx.NetworkID, + snowCtx.ChainID, + warpSigner, + blockClient, + database, + sigCache, + nil, + ) + require.NoError(t, err) + handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + + requestBytes, expectedResponse := test.setup() + protoMsg := &sdk.SignatureRequest{Message: requestBytes} + protoBytes, err := proto.Marshal(protoMsg) + require.NoError(t, err) + responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + if test.err != nil { + require.NotNil(t, appErr) + require.ErrorIs(t, test.err, appErr) + } else { + require.Nil(t, appErr) + } + + test.verifyStats(t, warpBackend.(*backend).stats) + + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + var response sdk.SignatureResponse + err = proto.Unmarshal(responseBytes, &response) + require.NoError(t, err, "error unmarshalling SignatureResponse") + + require.Equal(t, expectedResponse, response.Signature) + }) + } +} diff --git a/warp/wrapped_cache.go b/warp/wrapped_cache.go new file mode 100644 index 0000000000..8d70ac50b2 --- /dev/null +++ b/warp/wrapped_cache.go @@ -0,0 +1,52 @@ +// (c) 2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ethereum/go-ethereum/log" + "google.golang.org/protobuf/proto" +) + +type wrappedCache struct { + cache.Cacher[ids.ID, []byte] +} + +// NewWrappedCache takes a SDK cache that caches SignatureResponses and wraps it +// to return the Signature from the SignatureResponse. +func NewWrappedCache(sdkCache cache.Cacher[ids.ID, []byte]) cache.Cacher[ids.ID, []byte] { + return &wrappedCache{ + Cacher: sdkCache, + } +} + +func (w *wrappedCache) Get(key ids.ID) ([]byte, bool) { + responseBytes, ok := w.Cacher.Get(key) + if !ok { + return responseBytes, false + } + response := sdk.SignatureResponse{} + err := proto.Unmarshal(responseBytes, &response) + if err != nil { + log.Error("failed to unmarshal cached SignatureResponse", "error", err) + return nil, false + } + + return response.Signature, true +} + +func (w *wrappedCache) Put(key ids.ID, value []byte) { + response := sdk.SignatureResponse{ + Signature: value, + } + responseBytes, err := proto.Marshal(&response) + if err != nil { + log.Error("failed to marshal SignatureResponse", "error", err) + return + } + + w.Cacher.Put(key, responseBytes) +} From 8851256206d1a63df21fd0d017429daaead8d5fb Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sun, 13 Oct 2024 14:12:51 +0300 Subject: [PATCH 44/98] prune warp db before backend init --- plugin/evm/vm.go | 14 ++++----- plugin/evm/vm_warp_test.go | 59 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 7 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 02a9420211..977eb2e23b 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -494,6 +494,13 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to create warp signature cache: %w", err) } + // clear warpdb on initialization if config enabled + if vm.config.PruneWarpDB { + if err := database.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { + return fmt.Errorf("failed to prune warpDB: %w", err) + } + } + vm.warpBackend, err = warp.NewBackend( vm.ctx.NetworkID, vm.ctx.ChainID, @@ -507,13 +514,6 @@ func (vm *VM) Initialize( return err } - // clear warpdb on initialization if config enabled - if vm.config.PruneWarpDB { - if err := database.Clear(vm.warpDB, ethdb.IdealBatchSize); err != nil { - return fmt.Errorf("failed to prune warpDB: %w", err) - } - } - if err := vm.initializeChain(lastAcceptedHash, vm.ethConfig); err != nil { return err } diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 0297f6df62..f0b136df22 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/snow/validators/validatorstest" @@ -35,6 +36,7 @@ import ( "github.com/ava-labs/subnet-evm/precompile/contracts/warp" "github.com/ava-labs/subnet-evm/predicate" "github.com/ava-labs/subnet-evm/utils" + corewarp "github.com/ava-labs/subnet-evm/warp" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" @@ -853,3 +855,60 @@ func TestBlockSignatureRequestsToVM(t *testing.T) { }) } } + +func TestClearWarpDB(t *testing.T) { + ctx, db, genesisBytes, issuer, _ := setupGenesis(t, genesisJSONLatest) + vm := &VM{} + err := vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + require.NoError(t, err) + + // use multiple messages to test that all messages get cleared + payloads := [][]byte{[]byte("test1"), []byte("test2"), []byte("test3"), []byte("test4"), []byte("test5")} + messages := []*avalancheWarp.UnsignedMessage{} + + // add all messages + for _, payload := range payloads { + unsignedMsg, err := avalancheWarp.NewUnsignedMessage(vm.ctx.NetworkID, vm.ctx.ChainID, payload) + require.NoError(t, err) + err = vm.warpBackend.AddMessage(unsignedMsg) + require.NoError(t, err) + // ensure that the message was added + _, err = vm.warpBackend.GetMessageSignature(unsignedMsg) + require.NoError(t, err) + messages = append(messages, unsignedMsg) + } + + require.NoError(t, vm.Shutdown(context.Background())) + + // Restart VM with the same database default should not prune the warp db + vm = &VM{} + ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) + err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte{}, issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + require.NoError(t, err) + + // check messages are still present + for _, message := range messages { + bytes, err := vm.warpBackend.GetMessageSignature(message) + require.NoError(t, err) + require.NotEmpty(t, bytes) + } + + require.NoError(t, vm.Shutdown(context.Background())) + + // restart the VM with pruning enabled + vm = &VM{} + config := `{"prune-warp-db-enabled": true}` + ctx, _, _, _, _ = setupGenesis(t, genesisJSONLatest) + err = vm.Initialize(context.Background(), ctx, db, genesisBytes, []byte{}, []byte(config), issuer, []*commonEng.Fx{}, &enginetest.Sender{}) + require.NoError(t, err) + + it := vm.warpDB.NewIterator() + require.False(t, it.Next()) + it.Release() + + // ensure all messages have been deleted + for _, message := range messages { + _, err := vm.warpBackend.GetMessageSignature(message) + require.ErrorIs(t, err, &commonEng.AppError{Code: corewarp.ParseErrCode}) + } +} From 1a75f6c10c891b70c2ca1086cc8dbc03661701ac Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 14 Oct 2024 14:06:00 +0300 Subject: [PATCH 45/98] add cache tests --- warp/verifier_backend_test.go | 155 +++++++++++++++++++++------------- 1 file changed, 95 insertions(+), 60 deletions(-) diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 6b0c120db6..79d63ce383 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -88,37 +88,55 @@ func TestAddressedCallSignatures(t *testing.T) { } for name, test := range tests { - t.Run(name, func(t *testing.T) { - sigCache := &cache.LRU[ids.ID, []byte]{Size: 100} - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, sigCache, [][]byte{offchainMessage.Bytes()}) - require.NoError(t, err) - handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) - - requestBytes, expectedResponse := test.setup(warpBackend) - protoMsg := &sdk.SignatureRequest{Message: requestBytes} - protoBytes, err := proto.Marshal(protoMsg) - require.NoError(t, err) - responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) - if test.err != nil { - require.Error(t, appErr) - require.ErrorIs(t, appErr, test.err) + for _, withCache := range []bool{true, false} { + if withCache { + name += "_with_cache" } else { - require.Nil(t, appErr) + name += "_no_cache" } + t.Run(name, func(t *testing.T) { + var sigCache cache.Cacher[ids.ID, []byte] + if withCache { + sigCache = &cache.LRU[ids.ID, []byte]{Size: 100} + } else { + sigCache = &cache.Empty[ids.ID, []byte]{} + } + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, sigCache, [][]byte{offchainMessage.Bytes()}) + require.NoError(t, err) + handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + + requestBytes, expectedResponse := test.setup(warpBackend) + protoMsg := &sdk.SignatureRequest{Message: requestBytes} + protoBytes, err := proto.Marshal(protoMsg) + require.NoError(t, err) + responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + if test.err != nil { + require.Error(t, appErr) + require.ErrorIs(t, appErr, test.err) + } else { + require.Nil(t, appErr) + } - test.verifyStats(t, warpBackend.(*backend).stats) + test.verifyStats(t, warpBackend.(*backend).stats) - // If the expected response is empty, assert that the handler returns an empty response and return early. - if len(expectedResponse) == 0 { - require.Len(t, responseBytes, 0, "expected response to be empty") - return - } - response := &sdk.SignatureResponse{} - require.NoError(t, proto.Unmarshal(responseBytes, response)) - require.NoError(t, err, "error unmarshalling SignatureResponse") + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + // check cache is populated + if withCache { + require.NotZero(t, warpBackend.(*backend).signatureCache.Len()) + } else { + require.Zero(t, warpBackend.(*backend).signatureCache.Len()) + } + response := &sdk.SignatureResponse{} + require.NoError(t, proto.Unmarshal(responseBytes, response)) + require.NoError(t, err, "error unmarshalling SignatureResponse") - require.Equal(t, expectedResponse, response.Signature) - }) + require.Equal(t, expectedResponse, response.Signature) + }) + } } } @@ -179,44 +197,61 @@ func TestBlockSignatures(t *testing.T) { } for name, test := range tests { - t.Run(name, func(t *testing.T) { - sigCache := &cache.LRU[ids.ID, []byte]{Size: 100} - warpBackend, err := NewBackend( - snowCtx.NetworkID, - snowCtx.ChainID, - warpSigner, - blockClient, - database, - sigCache, - nil, - ) - require.NoError(t, err) - handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) - - requestBytes, expectedResponse := test.setup() - protoMsg := &sdk.SignatureRequest{Message: requestBytes} - protoBytes, err := proto.Marshal(protoMsg) - require.NoError(t, err) - responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) - if test.err != nil { - require.NotNil(t, appErr) - require.ErrorIs(t, test.err, appErr) + for _, withCache := range []bool{true, false} { + if withCache { + name += "_with_cache" } else { - require.Nil(t, appErr) + name += "_no_cache" } + t.Run(name, func(t *testing.T) { + var sigCache cache.Cacher[ids.ID, []byte] + if withCache { + sigCache = &cache.LRU[ids.ID, []byte]{Size: 100} + } else { + sigCache = &cache.Empty[ids.ID, []byte]{} + } + warpBackend, err := NewBackend( + snowCtx.NetworkID, + snowCtx.ChainID, + warpSigner, + blockClient, + database, + sigCache, + nil, + ) + require.NoError(t, err) + handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) - test.verifyStats(t, warpBackend.(*backend).stats) + requestBytes, expectedResponse := test.setup() + protoMsg := &sdk.SignatureRequest{Message: requestBytes} + protoBytes, err := proto.Marshal(protoMsg) + require.NoError(t, err) + responseBytes, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + if test.err != nil { + require.NotNil(t, appErr) + require.ErrorIs(t, test.err, appErr) + } else { + require.Nil(t, appErr) + } - // If the expected response is empty, assert that the handler returns an empty response and return early. - if len(expectedResponse) == 0 { - require.Len(t, responseBytes, 0, "expected response to be empty") - return - } - var response sdk.SignatureResponse - err = proto.Unmarshal(responseBytes, &response) - require.NoError(t, err, "error unmarshalling SignatureResponse") + test.verifyStats(t, warpBackend.(*backend).stats) - require.Equal(t, expectedResponse, response.Signature) - }) + // If the expected response is empty, assert that the handler returns an empty response and return early. + if len(expectedResponse) == 0 { + require.Len(t, responseBytes, 0, "expected response to be empty") + return + } + // check cache is populated + if withCache { + require.NotZero(t, warpBackend.(*backend).signatureCache.Len()) + } else { + require.Zero(t, warpBackend.(*backend).signatureCache.Len()) + } + var response sdk.SignatureResponse + err = proto.Unmarshal(responseBytes, &response) + require.NoError(t, err, "error unmarshalling SignatureResponse") + require.Equal(t, expectedResponse, response.Signature) + }) + } } } From 7143fc5e068f75fbcc67e6d393c77cf76159386e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 14 Oct 2024 14:08:47 +0300 Subject: [PATCH 46/98] remove uptime msg type --- warp/messages/codec.go | 1 - warp/messages/validator_uptime.go | 51 ------------------------------- 2 files changed, 52 deletions(-) delete mode 100644 warp/messages/validator_uptime.go diff --git a/warp/messages/codec.go b/warp/messages/codec.go index 87d2fa334a..ac736cbf67 100644 --- a/warp/messages/codec.go +++ b/warp/messages/codec.go @@ -24,7 +24,6 @@ func init() { lc := linearcodec.NewDefault() err := errors.Join( - lc.RegisterType(&ValidatorUptime{}), Codec.RegisterCodec(CodecVersion, lc), ) if err != nil { diff --git a/warp/messages/validator_uptime.go b/warp/messages/validator_uptime.go deleted file mode 100644 index 3d3e4dd5dd..0000000000 --- a/warp/messages/validator_uptime.go +++ /dev/null @@ -1,51 +0,0 @@ -// (c) 2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package messages - -import ( - "fmt" - - "github.com/ava-labs/avalanchego/ids" -) - -// ValidatorUptime is signed when the ValidationID is known and the validator -// has been up for TotalUptime seconds. -type ValidatorUptime struct { - ValidationID ids.ID `serialize:"true"` - TotalUptime uint64 `serialize:"true"` - - bytes []byte -} - -// NewValidatorUptime creates a new *ValidatorUptime and initializes it. -func NewValidatorUptime(validationID ids.ID, totalUptime uint64) (*ValidatorUptime, error) { - bhp := &ValidatorUptime{ - ValidationID: validationID, - TotalUptime: totalUptime, - } - return bhp, initialize(bhp) -} - -// ParseValidatorUptime converts a slice of bytes into an initialized ValidatorUptime. -func ParseValidatorUptime(b []byte) (*ValidatorUptime, error) { - payloadIntf, err := Parse(b) - if err != nil { - return nil, err - } - payload, ok := payloadIntf.(*ValidatorUptime) - if !ok { - return nil, fmt.Errorf("%w: %T", errWrongType, payloadIntf) - } - return payload, nil -} - -// Bytes returns the binary representation of this payload. It assumes that the -// payload is initialized from either NewValidatorUptime or Parse. -func (b *ValidatorUptime) Bytes() []byte { - return b.bytes -} - -func (b *ValidatorUptime) initialize(bytes []byte) { - b.bytes = bytes -} From 243fe5feb5b893412133d3090d823e3ee8455d44 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 14 Oct 2024 14:09:01 +0300 Subject: [PATCH 47/98] add cache test --- warp/verifier_backend.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index 64249bbba6..d5572071db 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -94,8 +94,6 @@ func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *com } switch p := parsed.(type) { - case *messages.ValidatorUptime: - return nil default: return &common.AppError{ Code: ParseErrCode, From 02eb89bb3651c42ad6a976dc6bdf5a704ea27ec6 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 14 Oct 2024 14:37:23 +0300 Subject: [PATCH 48/98] fix linter --- warp/messages/payload.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/warp/messages/payload.go b/warp/messages/payload.go index facf54524d..bea796f1dd 100644 --- a/warp/messages/payload.go +++ b/warp/messages/payload.go @@ -3,13 +3,6 @@ package messages -import ( - "errors" - "fmt" -) - -var errWrongType = errors.New("wrong payload type") - // Payload provides a common interface for all payloads implemented by this // package. type Payload interface { @@ -28,12 +21,3 @@ func Parse(bytes []byte) (Payload, error) { payload.initialize(bytes) return payload, nil } - -func initialize(p Payload) error { - bytes, err := Codec.Marshal(CodecVersion, &p) - if err != nil { - return fmt.Errorf("couldn't marshal %T payload: %w", p, err) - } - p.initialize(bytes) - return nil -} From aced0034d51805e5f53131ba66221248b386d152 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 14 Oct 2024 15:53:19 +0300 Subject: [PATCH 49/98] add validator uptimes --- warp/messages/codec.go | 1 + warp/messages/payload.go | 22 +++++++++++++ warp/messages/validator_uptime.go | 51 +++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+) create mode 100644 warp/messages/validator_uptime.go diff --git a/warp/messages/codec.go b/warp/messages/codec.go index ac736cbf67..87d2fa334a 100644 --- a/warp/messages/codec.go +++ b/warp/messages/codec.go @@ -24,6 +24,7 @@ func init() { lc := linearcodec.NewDefault() err := errors.Join( + lc.RegisterType(&ValidatorUptime{}), Codec.RegisterCodec(CodecVersion, lc), ) if err != nil { diff --git a/warp/messages/payload.go b/warp/messages/payload.go index bea796f1dd..3776a1356d 100644 --- a/warp/messages/payload.go +++ b/warp/messages/payload.go @@ -3,6 +3,13 @@ package messages +import ( + "errors" + "fmt" +) + +var errWrongType = errors.New("wrong payload type") + // Payload provides a common interface for all payloads implemented by this // package. type Payload interface { @@ -13,6 +20,12 @@ type Payload interface { initialize(b []byte) } +// Signable is an optional interface that payloads can implement to allow +// on-the-fly signing of incoming messages by the warp backend. +type Signable interface { + VerifyMesssage(sourceAddress []byte) error +} + func Parse(bytes []byte) (Payload, error) { var payload Payload if _, err := Codec.Unmarshal(bytes, &payload); err != nil { @@ -21,3 +34,12 @@ func Parse(bytes []byte) (Payload, error) { payload.initialize(bytes) return payload, nil } + +func initialize(p Payload) error { + bytes, err := Codec.Marshal(CodecVersion, &p) + if err != nil { + return fmt.Errorf("couldn't marshal %T payload: %w", p, err) + } + p.initialize(bytes) + return nil +} diff --git a/warp/messages/validator_uptime.go b/warp/messages/validator_uptime.go new file mode 100644 index 0000000000..3d3e4dd5dd --- /dev/null +++ b/warp/messages/validator_uptime.go @@ -0,0 +1,51 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package messages + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +// ValidatorUptime is signed when the ValidationID is known and the validator +// has been up for TotalUptime seconds. +type ValidatorUptime struct { + ValidationID ids.ID `serialize:"true"` + TotalUptime uint64 `serialize:"true"` + + bytes []byte +} + +// NewValidatorUptime creates a new *ValidatorUptime and initializes it. +func NewValidatorUptime(validationID ids.ID, totalUptime uint64) (*ValidatorUptime, error) { + bhp := &ValidatorUptime{ + ValidationID: validationID, + TotalUptime: totalUptime, + } + return bhp, initialize(bhp) +} + +// ParseValidatorUptime converts a slice of bytes into an initialized ValidatorUptime. +func ParseValidatorUptime(b []byte) (*ValidatorUptime, error) { + payloadIntf, err := Parse(b) + if err != nil { + return nil, err + } + payload, ok := payloadIntf.(*ValidatorUptime) + if !ok { + return nil, fmt.Errorf("%w: %T", errWrongType, payloadIntf) + } + return payload, nil +} + +// Bytes returns the binary representation of this payload. It assumes that the +// payload is initialized from either NewValidatorUptime or Parse. +func (b *ValidatorUptime) Bytes() []byte { + return b.bytes +} + +func (b *ValidatorUptime) initialize(bytes []byte) { + b.bytes = bytes +} From 1e46030728adcf9c8da1ad16d1a0cf67b2f7490a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 14 Oct 2024 16:31:51 +0300 Subject: [PATCH 50/98] bump avago getcurrentvalidators branch --- go.mod | 2 +- go.sum | 8 ++------ scripts/versions.sh | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 279bf3a144..4d3adb1409 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920150211-07af6b2fbe17 + github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241014132225-a2f59c8ccfbb github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 393a762a3b..95cfa288b9 100644 --- a/go.sum +++ b/go.sum @@ -58,12 +58,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240919204453-a754e44c1795 h1:zxqtKkAuU70XQlr3Pz6RQDvopXBqGDu2y2PshXe507U= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240919204453-a754e44c1795/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62 h1:H/8gjHFcYDC02oSDehRQW3s89cIV6aM2u7WfxMuxlNU= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920144520-3adf6d4c0f62/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920150211-07af6b2fbe17 h1:nc/U63uqdIikeiNexGe0PIOwSOWcRqmuOWuBxqJ7B3Y= -github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20240920150211-07af6b2fbe17/go.mod h1:YzHJbHAJOlRLwG1pxWk4uAI7nvV4cxpgQL1FSAx/H4Y= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241014132225-a2f59c8ccfbb h1:jMBexwAgUdwJ/IKzF+QvKMVeTQM0C2yfDvHiKc2EV78= +github.com/ava-labs/avalanchego v1.11.12-rc.2.0.20241014132225-a2f59c8ccfbb/go.mod h1:qSHmog3wMVjo/ruIAQo0ppXAilyni07NIu5K88RyhWE= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/scripts/versions.sh b/scripts/versions.sh index d5783f2838..78fe44ef2e 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'43d5b435'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'a2f59c8c'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier From 6133246a2091d3281b06750078309bbc1c74a4ac Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 15 Oct 2024 16:21:53 +0300 Subject: [PATCH 51/98] rename get validator IDs to NodeIDs --- plugin/evm/service.go | 2 +- plugin/evm/validators/state.go | 8 ++++---- plugin/evm/vm.go | 4 ++-- plugin/evm/vm_validators_state_test.go | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/plugin/evm/service.go b/plugin/evm/service.go index a7b0339599..25c0eeaf22 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -40,7 +40,7 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrent nodeIDs := set.Of(args.NodeIDs...) if nodeIDs.Len() == 0 { - nodeIDs = api.vm.validatorState.GetValidatorIDs() + nodeIDs = api.vm.validatorState.GetNodeIDs() } reply.Validators = make([]CurrentValidator, 0, nodeIDs.Len()) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index 3ea7711bae..d63b4eeb20 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -40,8 +40,8 @@ type State interface { // GetValidationIDs returns the validation IDs in the state GetValidationIDs() set.Set[ids.ID] - // GetValidatorIDs returns the validator node IDs in the state - GetValidatorIDs() set.Set[ids.NodeID] + // GetNodeIDs returns the validator node IDs in the state + GetNodeIDs() set.Set[ids.NodeID] // GetValidator returns the validator data for the given nodeID GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) @@ -241,8 +241,8 @@ func (s *state) GetValidationIDs() set.Set[ids.ID] { return ids } -// GetValidatorIDs returns the validator IDs in the state -func (s *state) GetValidatorIDs() set.Set[ids.NodeID] { +// GetNodeIDs returns the validator IDs in the state +func (s *state) GetNodeIDs() set.Set[ids.NodeID] { ids := set.NewSet[ids.NodeID](len(s.index)) for nodeID := range s.index { ids.Add(nodeID) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 4214a1dc05..26b83f6039 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -725,7 +725,7 @@ func (vm *VM) onNormalOperationsStarted() error { if err := vm.performValidatorUpdate(ctx); err != nil { return fmt.Errorf("failed to update validators: %w", err) } - vdrIDs := vm.validatorState.GetValidatorIDs().List() + vdrIDs := vm.validatorState.GetNodeIDs().List() // then start tracking with updated validators if err := vm.uptimeManager.StartTracking(vdrIDs); err != nil { return fmt.Errorf("failed to start tracking uptime: %w", err) @@ -866,7 +866,7 @@ func (vm *VM) Shutdown(context.Context) error { vm.cancel() } if vm.bootstrapped.Get() { - vdrIDs := vm.validatorState.GetValidatorIDs().List() + vdrIDs := vm.validatorState.GetNodeIDs().List() if err := vm.uptimeManager.StopTracking(vdrIDs); err != nil { return fmt.Errorf("failed to stop tracking uptime: %w", err) } diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 5a402441b8..15dfa538c3 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -152,7 +152,7 @@ func TestValidatorState(t *testing.T) { // new validator should be added to the state eventually after validatorsLoadFrequency require.EventuallyWithT(func(c *assert.CollectT) { - assert.Equal(c, 4, vm.validatorState.GetValidatorIDs().Len()) + assert.Equal(c, 4, vm.validatorState.GetNodeIDs().Len()) newValidator, err := vm.validatorState.GetValidator(newNodeID) assert.NoError(c, err) assert.Equal(c, newNodeID, newValidator.NodeID) From 72bba65d01951be07a0e4004d99e647db592fa90 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 15 Oct 2024 23:43:54 +0300 Subject: [PATCH 52/98] sign uptime warp msg base on uptime calculator --- plugin/evm/validators/locked_state_reader.go | 55 ++++++++++++++++++ plugin/evm/validators/noop_state.go | 59 ++++++++++++++++++++ plugin/evm/validators/state.go | 32 ++++++++--- plugin/evm/vm.go | 7 ++- warp/backend.go | 18 ++++-- warp/backend_test.go | 12 ++-- warp/handlers/signature_request_test.go | 6 +- warp/verifier_backend.go | 32 +++++++++++ warp/verifier_backend_test.go | 6 +- 9 files changed, 205 insertions(+), 22 deletions(-) create mode 100644 plugin/evm/validators/locked_state_reader.go create mode 100644 plugin/evm/validators/noop_state.go diff --git a/plugin/evm/validators/locked_state_reader.go b/plugin/evm/validators/locked_state_reader.go new file mode 100644 index 0000000000..022c7e9507 --- /dev/null +++ b/plugin/evm/validators/locked_state_reader.go @@ -0,0 +1,55 @@ +package validators + +import ( + "sync" + + ids "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +type lockedStateReader struct { + lock sync.Locker + s StateReader +} + +func NewLockedStateReader(lock sync.Locker, s State) StateReader { + return &lockedStateReader{ + lock: lock, + s: s, + } +} + +func (s *lockedStateReader) GetStatus(vID ids.ID) (bool, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.s.GetStatus(vID) +} + +func (s *lockedStateReader) GetValidationIDs() set.Set[ids.ID] { + s.lock.Lock() + defer s.lock.Unlock() + + return s.s.GetValidationIDs() +} + +func (s *lockedStateReader) GetNodeIDs() set.Set[ids.NodeID] { + s.lock.Lock() + defer s.lock.Unlock() + + return s.s.GetNodeIDs() +} + +func (s *lockedStateReader) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.s.GetValidator(nodeID) +} + +func (s *lockedStateReader) GetNodeID(vID ids.ID) (ids.NodeID, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.s.GetNodeID(vID) +} diff --git a/plugin/evm/validators/noop_state.go b/plugin/evm/validators/noop_state.go new file mode 100644 index 0000000000..822f9be4a0 --- /dev/null +++ b/plugin/evm/validators/noop_state.go @@ -0,0 +1,59 @@ +package validators + +import ( + "time" + + ids "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var NoOpState State = &noOpState{} + +type noOpState struct{} + +func (n *noOpState) GetStatus(vID ids.ID) (bool, error) { return false, nil } + +func (n *noOpState) GetValidationIDs() set.Set[ids.ID] { return set.NewSet[ids.ID](0) } + +func (n *noOpState) GetNodeIDs() set.Set[ids.NodeID] { return set.NewSet[ids.NodeID](0) } + +func (n *noOpState) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { + return nil, nil +} + +func (n *noOpState) GetNodeID(vID ids.ID) (ids.NodeID, error) { return ids.NodeID{}, nil } + +func (n *noOpState) GetSubnetID(chainID ids.ID) (ids.ID, error) { return ids.ID{}, nil } + +func (n *noOpState) GetUptime( + nodeID ids.NodeID, +) (upDuration time.Duration, lastUpdated time.Time, err error) { + return 0, time.Time{}, nil +} + +func (n *noOpState) SetUptime( + nodeID ids.NodeID, + upDuration time.Duration, + lastUpdated time.Time, +) error { + return nil +} + +func (n *noOpState) GetStartTime( + nodeID ids.NodeID, +) (startTime time.Time, err error) { + return time.Time{}, nil +} + +func (n *noOpState) AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { + return nil +} + +func (n *noOpState) DeleteValidator(vID ids.ID) error { + return nil +} +func (n *noOpState) WriteState() error { return nil } + +func (n *noOpState) SetStatus(vID ids.ID, isActive bool) error { return nil } + +func (n *noOpState) RegisterListener(StateCallbackListener) {} diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index d63b4eeb20..135a4b7f7d 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -24,8 +24,22 @@ const ( deleted dbUpdateStatus = false ) +type StateReader interface { + // GetStatus returns the active status of the validator with the given vID + GetStatus(vID ids.ID) (bool, error) + // GetValidationIDs returns the validation IDs in the state + GetValidationIDs() set.Set[ids.ID] + // GetNodeIDs returns the validator node IDs in the state + GetNodeIDs() set.Set[ids.NodeID] + // GetValidator returns the validator data for the given nodeID + GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) + // GetNodeID returns the node ID for the given validation ID + GetNodeID(vID ids.ID) (ids.NodeID, error) +} + type State interface { uptime.State + StateReader // AddValidator adds a new validator to the state AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error // DeleteValidator deletes the validator from the state @@ -35,15 +49,6 @@ type State interface { // SetStatus sets the active status of the validator with the given vID SetStatus(vID ids.ID, isActive bool) error - // GetStatus returns the active status of the validator with the given vID - GetStatus(vID ids.ID) (bool, error) - - // GetValidationIDs returns the validation IDs in the state - GetValidationIDs() set.Set[ids.ID] - // GetNodeIDs returns the validator node IDs in the state - GetNodeIDs() set.Set[ids.NodeID] - // GetValidator returns the validator data for the given nodeID - GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) // RegisterListener registers a listener to the state RegisterListener(StateCallbackListener) @@ -264,6 +269,15 @@ func (s *state) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { }, nil } +// GetNodeID returns the node ID for the given validation ID +func (s *state) GetNodeID(vID ids.ID) (ids.NodeID, error) { + data, exists := s.data[vID] + if !exists { + return ids.NodeID{}, database.ErrNotFound + } + return data.NodeID, nil +} + // RegisterListener registers a listener to the state // OnValidatorAdded is called for all current validators on the provided listener before this function returns func (s *state) RegisterListener(listener StateCallbackListener) { diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 26b83f6039..9d0c914a2d 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -250,7 +250,8 @@ type VM struct { ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper - uptimeManager uptime.PausableManager + UptimeLockedCalculator avalancheUptime.LockedCalculator + uptimeManager uptime.PausableManager validatorState validators.State } @@ -492,6 +493,8 @@ func (vm *VM) Initialize( } // TODO: add a configuration to disable tracking uptime vm.uptimeManager = uptime.NewPausableManager(avalancheUptime.NewManager(vm.validatorState, &vm.clock)) + vm.UptimeLockedCalculator = avalancheUptime.NewLockedCalculator() + vm.UptimeLockedCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) vm.validatorState.RegisterListener(vm.uptimeManager) // Initialize warp backend @@ -522,6 +525,8 @@ func (vm *VM) Initialize( vm.ctx.ChainID, vm.ctx.WarpSigner, vm, + vm.UptimeLockedCalculator, + validators.NewLockedStateReader(vm.ctx.Lock.RLocker(), vm.validatorState), vm.warpDB, meteredCache, offchainWarpMessages, diff --git a/warp/backend.go b/warp/backend.go index 8d60fee41e..4afa89a09b 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -13,8 +13,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/uptime" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ethereum/go-ethereum/log" ) @@ -56,6 +58,8 @@ type backend struct { db database.Database warpSigner avalancheWarp.Signer blockClient BlockClient + uptimeCalculator uptime.Calculator + validatorState validators.StateReader signatureCache cache.Cacher[ids.ID, []byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage @@ -68,16 +72,20 @@ func NewBackend( sourceChainID ids.ID, warpSigner avalancheWarp.Signer, blockClient BlockClient, + uptimeCalculator uptime.Calculator, + validatorsState validators.StateReader, db database.Database, sdkCache cache.Cacher[ids.ID, []byte], offchainMessages [][]byte, ) (Backend, error) { b := &backend{ - networkID: networkID, - sourceChainID: sourceChainID, - db: db, - warpSigner: warpSigner, - blockClient: blockClient, + networkID: networkID, + sourceChainID: sourceChainID, + db: db, + warpSigner: warpSigner, + blockClient: blockClient, + uptimeCalculator: uptimeCalculator, + validatorState: validatorsState, // sdkCache returns sdk.SignatureResponse proto bytes, // and it must be wrapped to return Signature bytes. signatureCache: NewWrappedCache(sdkCache), diff --git a/warp/backend_test.go b/warp/backend_test.go index 91129bd104..d312941dfd 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -9,10 +9,12 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/warp/warptest" "github.com/stretchr/testify/require" ) @@ -43,7 +45,7 @@ func TestAddAndGetValidMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -66,7 +68,7 @@ func TestAddAndGetUnknownMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) require.NoError(t, err) // Try getting a signature for a message that was not added. @@ -85,7 +87,7 @@ func TestGetBlockSignature(t *testing.T) { require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) require.NoError(err) blockHashPayload, err := payload.NewHash(blkID) @@ -112,7 +114,7 @@ func TestZeroSizedCache(t *testing.T) { // Verify zero sized cache works normally, because the lru cache will be initialized to size 1 for any size parameter <= 0. messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -166,7 +168,7 @@ func TestOffChainMessages(t *testing.T) { db := memdb.New() messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, db, messageSignatureCache, test.offchainMessages) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, test.offchainMessages) require.ErrorIs(err, test.err) if test.check != nil { test.check(require, backend) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 8320b9cea1..bc23a72967 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -10,10 +10,12 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/plugin/evm/message" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/warp" "github.com/ava-labs/subnet-evm/warp/warptest" @@ -33,7 +35,7 @@ func TestMessageSignatureHandler(t *testing.T) { require.NoError(t, err) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) + backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, validators.NoOpState, database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) @@ -139,6 +141,8 @@ func TestBlockSignatureHandler(t *testing.T) { snowCtx.ChainID, warpSigner, blockClient, + uptime.NoOpCalculator, + validators.NoOpState, database, messageSignatureCache, nil, diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index d5572071db..e6b41106da 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -94,6 +94,8 @@ func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *com } switch p := parsed.(type) { + case *messages.ValidatorUptime: + return b.verifyUptimeMessage(p) default: return &common.AppError{ Code: ParseErrCode, @@ -101,3 +103,33 @@ func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *com } } } + +func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *common.AppError { + // first get the validator's nodeID + nodeID, err := b.validatorState.GetNodeID(uptimeMsg.ValidationID) + if err != nil { + return &common.AppError{ + Code: VerifyErrCode, + Message: fmt.Sprintf("failed to get nodeID for validationID %s: %s", uptimeMsg.ValidationID, err.Error()), + } + } + + // then get the current uptime + currentUptime, _, err := b.uptimeCalculator.CalculateUptime(nodeID) + if err != nil { + return &common.AppError{ + Code: VerifyErrCode, + Message: fmt.Sprintf("failed to calculate uptime for nodeID %s: %s", nodeID, err.Error()), + } + } + + // verify the current uptime against the total uptime in the message + if uint64(currentUptime.Seconds()) < uptimeMsg.TotalUptime { + return &common.AppError{ + Code: VerifyErrCode, + Message: fmt.Sprintf("current uptime %d is less than total uptime %d for nodeID %s", currentUptime, uptimeMsg.TotalUptime, nodeID), + } + } + + return nil +} diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 79d63ce383..4386592692 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -14,9 +14,11 @@ import ( "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/warp/warptest" "github.com/stretchr/testify/require" @@ -101,7 +103,7 @@ func TestAddressedCallSignatures(t *testing.T) { } else { sigCache = &cache.Empty[ids.ID, []byte]{} } - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, database, sigCache, [][]byte{offchainMessage.Bytes()}) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, validators.NoOpState, database, sigCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) @@ -215,6 +217,8 @@ func TestBlockSignatures(t *testing.T) { snowCtx.ChainID, warpSigner, blockClient, + uptime.NoOpCalculator, + validators.NoOpState, database, sigCache, nil, From 4db9689e89abfe569c7059d07bebcf0d6e144a0c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 17 Oct 2024 16:31:45 +0300 Subject: [PATCH 53/98] add tests --- warp/messages/validator_uptime.go | 2 +- warp/verifier_backend.go | 7 +-- warp/verifier_backend_test.go | 80 ++++++++++++++++++++++++++++++- 3 files changed, 83 insertions(+), 6 deletions(-) diff --git a/warp/messages/validator_uptime.go b/warp/messages/validator_uptime.go index 3d3e4dd5dd..cd14b39538 100644 --- a/warp/messages/validator_uptime.go +++ b/warp/messages/validator_uptime.go @@ -13,7 +13,7 @@ import ( // has been up for TotalUptime seconds. type ValidatorUptime struct { ValidationID ids.ID `serialize:"true"` - TotalUptime uint64 `serialize:"true"` + TotalUptime uint64 `serialize:"true"` // in seconds bytes []byte } diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index e6b41106da..effe970fee 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -110,7 +110,7 @@ func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *comm if err != nil { return &common.AppError{ Code: VerifyErrCode, - Message: fmt.Sprintf("failed to get nodeID for validationID %s: %s", uptimeMsg.ValidationID, err.Error()), + Message: fmt.Sprintf("failed to get validator for validationID %s: %s", uptimeMsg.ValidationID, err.Error()), } } @@ -123,11 +123,12 @@ func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *comm } } + currentUptimeSeconds := uint64(currentUptime.Seconds()) // verify the current uptime against the total uptime in the message - if uint64(currentUptime.Seconds()) < uptimeMsg.TotalUptime { + if currentUptimeSeconds < uptimeMsg.TotalUptime { return &common.AppError{ Code: VerifyErrCode, - Message: fmt.Sprintf("current uptime %d is less than total uptime %d for nodeID %s", currentUptime, uptimeMsg.TotalUptime, nodeID), + Message: fmt.Sprintf("current uptime %d is less than queried uptime %d for nodeID %s", currentUptimeSeconds, uptimeMsg.TotalUptime, nodeID), } } diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 4386592692..c2327a7804 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -16,10 +16,12 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/timer/mockable" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/warp/messages" "github.com/ava-labs/subnet-evm/warp/warptest" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -50,11 +52,11 @@ func TestAddressedCallSignatures(t *testing.T) { require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, knownPayload.Bytes()) require.NoError(t, err) - offchainSignature, err := warpSigner.Sign(msg) + knownSignature, err := warpSigner.Sign(msg) require.NoError(t, err) backend.AddMessage(msg) - return msg.Bytes(), offchainSignature[:] + return msg.Bytes(), knownSignature[:] }, verifyStats: func(t *testing.T, stats *verifierStats) { require.EqualValues(t, 0, stats.messageParseFail.Snapshot().Count()) @@ -259,3 +261,77 @@ func TestBlockSignatures(t *testing.T) { } } } + +func TestUptimeSignatures(t *testing.T) { + database := memdb.New() + snowCtx := utils.TestSnowContext() + blsSecretKey, err := bls.NewSecretKey() + require.NoError(t, err) + warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) + + getUptimeMessageBytes := func(vID ids.ID, totalUptime uint64) ([]byte, *avalancheWarp.UnsignedMessage) { + uptimePayload, err := messages.NewValidatorUptime(vID, 80) + require.NoError(t, err) + addressedCall, err := payload.NewAddressedCall([]byte{1, 2, 3}, uptimePayload.Bytes()) + require.NoError(t, err) + unsignedMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, addressedCall.Bytes()) + require.NoError(t, err) + + protoMsg := &sdk.SignatureRequest{Message: unsignedMessage.Bytes()} + protoBytes, err := proto.Marshal(protoMsg) + require.NoError(t, err) + return protoBytes, unsignedMessage + } + + for _, withCache := range []bool{true, false} { + var sigCache cache.Cacher[ids.ID, []byte] + if withCache { + sigCache = &cache.LRU[ids.ID, []byte]{Size: 100} + } else { + sigCache = &cache.Empty[ids.ID, []byte]{} + } + state, err := validators.NewState(memdb.New()) + require.NoError(t, err) + clk := &mockable.Clock{} + uptimeManager := uptime.NewManager(state, clk) + uptimeManager.StartTracking([]ids.NodeID{}) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptimeManager, state, database, sigCache, nil) + require.NoError(t, err) + handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + + // not existing validationID + vID := ids.GenerateTestID() + protoBytes, _ := getUptimeMessageBytes(vID, 80) + _, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) + require.Contains(t, appErr.Error(), "failed to get validator") + + // uptime is less than requested (not connected) + validationID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + require.NoError(t, state.AddValidator(validationID, nodeID, clk.Unix(), true)) + protoBytes, _ = getUptimeMessageBytes(validationID, 80) + _, appErr = handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) + require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) + require.Contains(t, appErr.Error(), "current uptime 0 is less than queried uptime 80") + + // uptime is less than requested (not enough) + require.NoError(t, uptimeManager.Connect(nodeID)) + clk.Set(clk.Time().Add(40 * time.Second)) + protoBytes, _ = getUptimeMessageBytes(validationID, 80) + _, appErr = handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) + require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) + require.Contains(t, appErr.Error(), "current uptime 40 is less than queried uptime 80") + + // valid uptime + clk.Set(clk.Time().Add(40 * time.Second)) + protoBytes, msg := getUptimeMessageBytes(validationID, 80) + responseBytes, appErr := handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) + require.Nil(t, appErr) + expectedSignature, err := warpSigner.Sign(msg) + require.NoError(t, err) + response := &sdk.SignatureResponse{} + require.NoError(t, proto.Unmarshal(responseBytes, response)) + require.Equal(t, expectedSignature[:], response.Signature) + } +} From cab1ddf3e74849240974c5247009409fa7cea17b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 11:27:34 +0300 Subject: [PATCH 54/98] reviews --- plugin/evm/uptime/pausable_manager.go | 5 ++--- plugin/evm/uptime/pausable_manager_test.go | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go index 1505616aeb..18370d41f6 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/uptime/pausable_manager.go @@ -32,8 +32,7 @@ type pausableManager struct { uptime.Manager pausedVdrs set.Set[ids.NodeID] // connectedVdrs is a set of nodes that are connected to the manager. - // This is used to keep track of the nodes that are connected to the manager - // but are paused. + // This is used to immediately connect nodes when they are unpaused. connectedVdrs set.Set[ids.NodeID] } @@ -150,7 +149,7 @@ func (p *pausableManager) resume(nodeID ids.NodeID) error { return errNotPaused } p.pausedVdrs.Remove(nodeID) - if p.connectedVdrs.Contains(nodeID) { + if p.connectedVdrs.Contains(nodeID) && !p.Manager.IsConnected(nodeID) { return p.Manager.Connect(nodeID) } return nil diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go index 3d6f996134..03a6228d22 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -19,7 +19,7 @@ func TestPausableManager(t *testing.T) { startTime := time.Now() require := require.New(t) - // Connect before pause before tracking + // Case 1: Connect, pause, start tracking { up, clk, _ := setupTestEnv(nodeID0, startTime) @@ -45,7 +45,7 @@ func TestPausableManager(t *testing.T) { checkUptime(t, up, nodeID0, 0*time.Second, currentTime) } - // Paused after tracking resumed after tracking + // Case 2: Start tracking, connect, pause, re-connect, resume { up, clk, _ := setupTestEnv(nodeID0, startTime) @@ -91,7 +91,7 @@ func TestPausableManager(t *testing.T) { checkUptime(t, up, nodeID0, 8*time.Second, currentTime) } - // Paused before tracking resumed after tracking + // Case 3: Pause, start tracking, connect, re-connect, resume { up, clk, _ := setupTestEnv(nodeID0, startTime) @@ -128,7 +128,7 @@ func TestPausableManager(t *testing.T) { checkUptime(t, up, nodeID0, 6*time.Second, currentTime) } - // Paused after tracking resumed before tracking + // Case 4: Start tracking, connect, pause, stop tracking, resume tracking. { up, clk, s := setupTestEnv(nodeID0, startTime) From 5f8bf5b919c5a4876e49be211bb87dc0fae82bf0 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 11:45:29 +0300 Subject: [PATCH 55/98] conflict fix --- plugin/evm/vm.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 17655e1ac8..630ef39b9b 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -208,6 +208,9 @@ type VM struct { // [db] is the VM's current database managed by ChainState db *versiondb.Database + // metadataDB is used to store one off keys. + metadataDB database.Database + // [chaindb] is the database supplied to the Ethereum backend chaindb ethdb.Database @@ -491,7 +494,6 @@ func (vm *VM) Initialize( if err != nil { return fmt.Errorf("failed to initialize p2p network: %w", err) } - // TODO: consider using p2p validators for Subnet-EVM's validatorState vm.validators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) vm.networkCodec = message.Codec vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) @@ -609,7 +611,6 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { } } - metadataDB := prefixdb.New(metadataPrefix, vm.db) vm.StateSyncClient = NewStateSyncClient(&stateSyncClientConfig{ chain: vm.eth, state: vm.State, @@ -628,7 +629,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { stateSyncRequestSize: vm.config.StateSyncRequestSize, lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around chaindb: vm.chaindb, - metadataDB: metadataDB, + metadataDB: vm.metadataDB, acceptedBlockDB: vm.acceptedBlockDB, db: vm.db, toEngine: vm.toEngine, From 0ba2869fc64ea6733500f45be8a03a86ee65a677 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 12:17:01 +0300 Subject: [PATCH 56/98] custom err msg --- plugin/evm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 630ef39b9b..029ae574af 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1408,7 +1408,7 @@ func (vm *VM) createDatabase(dbConfig avalancheNode.DatabaseConfig) (database.Da func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { if err := vm.uptimeManager.Connect(nodeID); err != nil { - return err + return fmt.Errorf("uptime manager failed to connect node %s: %w", nodeID, err) } return vm.Network.Connected(ctx, nodeID, version) } From 374d885c5a9c91311213181025185092722a63ee Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 12:56:28 +0300 Subject: [PATCH 57/98] add listener mock --- plugin/evm/validators/mock_listener.go | 76 ++++++++++++++++++++++++++ plugin/evm/validators/state_test.go | 70 ++++++------------------ scripts/mocks.mockgen.txt | 1 + 3 files changed, 94 insertions(+), 53 deletions(-) create mode 100644 plugin/evm/validators/mock_listener.go diff --git a/plugin/evm/validators/mock_listener.go b/plugin/evm/validators/mock_listener.go new file mode 100644 index 0000000000..d67703007d --- /dev/null +++ b/plugin/evm/validators/mock_listener.go @@ -0,0 +1,76 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/subnet-evm/plugin/evm/validators (interfaces: StateCallbackListener) +// +// Generated by this command: +// +// mockgen -package=validators -destination=plugin/evm/validators/mock_listener.go github.com/ava-labs/subnet-evm/plugin/evm/validators StateCallbackListener +// + +// Package validators is a generated GoMock package. +package validators + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "go.uber.org/mock/gomock" +) + +// MockStateCallbackListener is a mock of StateCallbackListener interface. +type MockStateCallbackListener struct { + ctrl *gomock.Controller + recorder *MockStateCallbackListenerMockRecorder +} + +// MockStateCallbackListenerMockRecorder is the mock recorder for MockStateCallbackListener. +type MockStateCallbackListenerMockRecorder struct { + mock *MockStateCallbackListener +} + +// NewMockStateCallbackListener creates a new mock instance. +func NewMockStateCallbackListener(ctrl *gomock.Controller) *MockStateCallbackListener { + mock := &MockStateCallbackListener{ctrl: ctrl} + mock.recorder = &MockStateCallbackListenerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStateCallbackListener) EXPECT() *MockStateCallbackListenerMockRecorder { + return m.recorder +} + +// OnValidatorAdded mocks base method. +func (m *MockStateCallbackListener) OnValidatorAdded(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64, arg3 bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnValidatorAdded", arg0, arg1, arg2, arg3) +} + +// OnValidatorAdded indicates an expected call of OnValidatorAdded. +func (mr *MockStateCallbackListenerMockRecorder) OnValidatorAdded(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnValidatorAdded", reflect.TypeOf((*MockStateCallbackListener)(nil).OnValidatorAdded), arg0, arg1, arg2, arg3) +} + +// OnValidatorRemoved mocks base method. +func (m *MockStateCallbackListener) OnValidatorRemoved(arg0 ids.ID, arg1 ids.NodeID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnValidatorRemoved", arg0, arg1) +} + +// OnValidatorRemoved indicates an expected call of OnValidatorRemoved. +func (mr *MockStateCallbackListenerMockRecorder) OnValidatorRemoved(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnValidatorRemoved", reflect.TypeOf((*MockStateCallbackListener)(nil).OnValidatorRemoved), arg0, arg1) +} + +// OnValidatorStatusUpdated mocks base method. +func (m *MockStateCallbackListener) OnValidatorStatusUpdated(arg0 ids.ID, arg1 ids.NodeID, arg2 bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "OnValidatorStatusUpdated", arg0, arg1, arg2) +} + +// OnValidatorStatusUpdated indicates an expected call of OnValidatorStatusUpdated. +func (mr *MockStateCallbackListenerMockRecorder) OnValidatorStatusUpdated(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnValidatorStatusUpdated", reflect.TypeOf((*MockStateCallbackListener)(nil).OnValidatorStatusUpdated), arg0, arg1, arg2) +} diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index 2babe1d6ab..ecfd7d34a9 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" @@ -216,71 +217,34 @@ func TestStateListener(t *testing.T) { db := memdb.New() state, err := NewState(db) require.NoError(err) + ctrl := gomock.NewController(t) + defer ctrl.Finish() expectedvID := ids.GenerateTestID() expectedNodeID := ids.GenerateTestNodeID() expectedStartTime := time.Now() + mockListener := NewMockStateCallbackListener(ctrl) + // add initial validator to test RegisterListener + initialvID := ids.GenerateTestID() + initialNodeID := ids.GenerateTestNodeID() + initialStartTime := time.Now() - // add listener - listener := &testCallbackListener{ - t: t, - onAdd: func(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { - require.Equal(expectedvID, vID) - require.Equal(expectedNodeID, nodeID) - require.Equal(uint64(expectedStartTime.Unix()), startTime) - require.True(isActive) - }, - onRemove: func(vID ids.ID, nodeID ids.NodeID) { - require.Equal(expectedvID, vID) - require.Equal(expectedNodeID, nodeID) - }, - onStatusUpdate: func(vID ids.ID, nodeID ids.NodeID, isActive bool) { - require.Equal(expectedvID, vID) - require.Equal(expectedNodeID, nodeID) - require.False(isActive) - }, - } - state.RegisterListener(listener) + // add initial validator + require.NoError(state.AddValidator(initialvID, initialNodeID, uint64(initialStartTime.Unix()), true)) + + // register listener + mockListener.EXPECT().OnValidatorAdded(initialvID, initialNodeID, uint64(initialStartTime.Unix()), true) + state.RegisterListener(mockListener) // add new validator + mockListener.EXPECT().OnValidatorAdded(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) require.NoError(state.AddValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true)) // set status + mockListener.EXPECT().OnValidatorStatusUpdated(expectedvID, expectedNodeID, false) require.NoError(state.SetStatus(expectedvID, false)) // remove validator + mockListener.EXPECT().OnValidatorRemoved(expectedvID, expectedNodeID) require.NoError(state.DeleteValidator(expectedvID)) } - -var _ StateCallbackListener = (*testCallbackListener)(nil) - -type testCallbackListener struct { - t *testing.T - onAdd func(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) - onRemove func(ids.ID, ids.NodeID) - onStatusUpdate func(ids.ID, ids.NodeID, bool) -} - -func (t *testCallbackListener) OnValidatorAdded(vID ids.ID, nodeID ids.NodeID, startTime uint64, isActive bool) { - if t.onAdd != nil { - t.onAdd(vID, nodeID, startTime, isActive) - } else { - t.t.Fail() - } -} - -func (t *testCallbackListener) OnValidatorRemoved(vID ids.ID, nodeID ids.NodeID) { - if t.onRemove != nil { - t.onRemove(vID, nodeID) - } else { - t.t.Fail() - } -} - -func (t *testCallbackListener) OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) { - if t.onStatusUpdate != nil { - t.onStatusUpdate(vID, nodeID, isActive) - } else { - t.t.Fail() - } -} diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index 391dc8e13c..73aa62ccd7 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -1,2 +1,3 @@ github.com/ava-labs/subnet-evm/precompile/precompileconfig=Predicater,Config,ChainConfig,Accepter=precompile/precompileconfig/mocks.go github.com/ava-labs/subnet-evm/precompile/contract=BlockContext,AccessibleState,StateDB=precompile/contract/mocks.go +github.com/ava-labs/subnet-evm/plugin/evm/validators=StateCallbackListener=plugin/evm/validators/mock_listener.go From 9e707ad044f19024e7f3b36612265db9f861f9d9 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 17:15:17 +0300 Subject: [PATCH 58/98] bump avago test branch --- go.mod | 2 +- go.sum | 2 ++ plugin/evm/vm.go | 2 +- plugin/evm/vm_validators_state_test.go | 8 ++++---- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 8a074d32ca..eb7519162f 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.13-0.20241026214739-acb3d7d102a0 + github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index dd6bf4e4f6..c5d14ece7a 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,8 @@ github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanchego v1.11.13-0.20241026214739-acb3d7d102a0 h1:1T9OnvZP6XZ62EVWlfmrI8rrudyE6bM2Zt51pCHfS5o= github.com/ava-labs/avalanchego v1.11.13-0.20241026214739-acb3d7d102a0/go.mod h1:gYlTU42Q4b29hzhUN22yclym5qwB3Si0jh4+LTn7DZM= +github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9 h1:rX5Xn5WBPppyvvZXp8wwx9dX+4fBY2/XeNIEm18ifzk= +github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9/go.mod h1:gYlTU42Q4b29hzhUN22yclym5qwB3Si0jh4+LTn7DZM= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index a6c3f12213..983336e4d4 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1447,7 +1447,7 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { now := time.Now() log.Debug("performing validator update") // get current validator set - currentValidatorSet, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) + currentValidatorSet, _, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) if err != nil { return fmt.Errorf("failed to get current validator set: %w", err) } diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 5a402441b8..4747fb0fbf 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -43,7 +43,7 @@ func TestValidatorState(t *testing.T) { ids.GenerateTestID(), } ctx.ValidatorState = &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, bool, error) { return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], @@ -60,7 +60,7 @@ func TestValidatorState(t *testing.T) { PublicKey: nil, Weight: 1, }, - }, 0, nil + }, 0, false, nil }, } appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } @@ -120,7 +120,7 @@ func TestValidatorState(t *testing.T) { newValidationID := ids.GenerateTestID() newNodeID := ids.GenerateTestNodeID() testState := &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, bool, error) { return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], @@ -142,7 +142,7 @@ func TestValidatorState(t *testing.T) { PublicKey: nil, Weight: 1, }, - }, 0, nil + }, 0, false, nil }, } vm.ctx.ValidatorState = testState From cc732668fe77b616ba685f3c72a11d2b079cf17d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 17:46:46 +0300 Subject: [PATCH 59/98] remove config --- plugin/evm/config.go | 6 ------ plugin/evm/service.go | 1 - plugin/evm/vm.go | 7 +++++-- scripts/versions.sh | 2 +- 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 9750e4af3d..41aa8e418c 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -63,8 +63,6 @@ const ( defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request defaultValidatorsAPIEnabled = true - // TODO: decide for a sane value for this - defaultLoadValidatorsFrequency = 5 * time.Minute defaultDBType = pebbledb.Name ) @@ -236,9 +234,6 @@ type Config struct { // RPC settings HttpBodyLimit uint64 `json:"http-body-limit"` - // LoadValidatorsFrequency is the frequency at which the node should load the validators - LoadValidatorsFrequency time.Duration `json:"load-validators-frequency"` - // Database settings UseStandaloneDatabase *PBool `json:"use-standalone-database"` DatabaseConfigContent string `json:"database-config"` @@ -306,7 +301,6 @@ func (c *Config) SetDefaults() { c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize c.ValidatorsAPIEnabled = defaultValidatorsAPIEnabled - c.LoadValidatorsFrequency = defaultLoadValidatorsFrequency c.DatabaseType = defaultDBType } diff --git a/plugin/evm/service.go b/plugin/evm/service.go index a7b0339599..52ffb11329 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -33,7 +33,6 @@ type CurrentValidator struct { Uptime time.Duration `json:"uptime"` } -// GetUptime returns the uptime of the node func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsRequest, reply *GetCurrentValidatorsResponse) error { api.vm.ctx.Lock.RLock() defer api.vm.ctx.Lock.RUnlock() diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 983336e4d4..432277de43 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -134,6 +134,8 @@ const ( txGossipThrottlingPeriod = 10 * time.Second txGossipThrottlingLimit = 2 txGossipPollSize = 1 + + loadValidatorsFrequency = 1 * time.Minute ) // Define the API endpoints for the VM @@ -506,7 +508,7 @@ func (vm *VM) Initialize( if err != nil { return fmt.Errorf("failed to initialize validator state: %w", err) } - // TODO: add a configuration to disable tracking uptime + vm.uptimeManager = uptime.NewPausableManager(avalancheUptime.NewManager(vm.validatorState, &vm.clock)) vm.validatorState.RegisterListener(vm.uptimeManager) @@ -1424,7 +1426,7 @@ func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { } func (vm *VM) dispatchUpdateValidators(ctx context.Context) { - ticker := time.NewTicker(vm.config.LoadValidatorsFrequency) + ticker := time.NewTicker(loadValidatorsFrequency) defer ticker.Stop() for { @@ -1464,6 +1466,7 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { return fmt.Errorf("failed to write validator state: %w", err) } + // TODO: add metrics log.Debug("validator update complete", "duration", time.Since(now)) return nil } diff --git a/scripts/versions.sh b/scripts/versions.sh index 739e0d2b75..fa04254c77 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'07af6b2f'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'e05aa37a'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier From 8ef763fca65a7144ac35b69889873728d3485558 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 17:51:37 +0300 Subject: [PATCH 60/98] remove api changes --- plugin/evm/api.go | 38 --------------------- plugin/evm/config.go | 15 +++----- plugin/evm/service.go | 79 ++++++++++++------------------------------- plugin/evm/vm.go | 11 ------ 4 files changed, 27 insertions(+), 116 deletions(-) delete mode 100644 plugin/evm/api.go diff --git a/plugin/evm/api.go b/plugin/evm/api.go deleted file mode 100644 index a8fe61cbc0..0000000000 --- a/plugin/evm/api.go +++ /dev/null @@ -1,38 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "context" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -// SnowmanAPI introduces snowman specific functionality to the evm -type SnowmanAPI struct{ vm *VM } - -// GetAcceptedFrontReply defines the reply that will be sent from the -// GetAcceptedFront API call -type GetAcceptedFrontReply struct { - Hash common.Hash `json:"hash"` - Number *big.Int `json:"number"` -} - -// GetAcceptedFront returns the last accepted block's hash and height -func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { - blk := api.vm.blockChain.LastConsensusAcceptedBlock() - return &GetAcceptedFrontReply{ - Hash: blk.Hash(), - Number: blk.Number(), - }, nil -} - -// IssueBlock to the chain -func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { - log.Info("Issuing a new block") - api.vm.builder.signalTxsReady() - return nil -} diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 41aa8e418c..d4a4b44af2 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -61,10 +61,7 @@ const ( // - state sync time: ~6 hrs. defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request - - defaultValidatorsAPIEnabled = true - - defaultDBType = pebbledb.Name + defaultDBType = pebbledb.Name ) type PBool bool @@ -94,11 +91,10 @@ type Config struct { AirdropFile string `json:"airdrop"` // Subnet EVM APIs - SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - ValidatorsAPIEnabled bool `json:"validators-api-enabled"` - AdminAPIEnabled bool `json:"admin-api-enabled"` - AdminAPIDir string `json:"admin-api-dir"` - WarpAPIEnabled bool `json:"warp-api-enabled"` + SnowmanAPIEnabled bool `json:"snowman-api-enabled"` + AdminAPIEnabled bool `json:"admin-api-enabled"` + AdminAPIDir string `json:"admin-api-dir"` + WarpAPIEnabled bool `json:"warp-api-enabled"` // EnabledEthAPIs is a list of Ethereum services that should be enabled // If none is specified, then we use the default list [defaultEnabledAPIs] @@ -300,7 +296,6 @@ func (c *Config) SetDefaults() { c.StateSyncRequestSize = defaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize - c.ValidatorsAPIEnabled = defaultValidatorsAPIEnabled c.DatabaseType = defaultDBType } diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 52ffb11329..a8fe61cbc0 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -4,70 +4,35 @@ package evm import ( - "net/http" - "time" + "context" + "math/big" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" ) -type ValidatorsAPI struct { - vm *VM -} - -type GetCurrentValidatorsRequest struct { - NodeIDs []ids.NodeID `json:"nodeIDs"` -} +// SnowmanAPI introduces snowman specific functionality to the evm +type SnowmanAPI struct{ vm *VM } -type GetCurrentValidatorsResponse struct { - Validators []CurrentValidator `json:"validators"` +// GetAcceptedFrontReply defines the reply that will be sent from the +// GetAcceptedFront API call +type GetAcceptedFrontReply struct { + Hash common.Hash `json:"hash"` + Number *big.Int `json:"number"` } -type CurrentValidator struct { - ValidationID ids.ID `json:"validationID"` - NodeID ids.NodeID `json:"nodeID"` - StartTime time.Time `json:"startTime"` - IsActive bool `json:"isActive"` - IsConnected bool `json:"isConnected"` - Uptime time.Duration `json:"uptime"` +// GetAcceptedFront returns the last accepted block's hash and height +func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { + blk := api.vm.blockChain.LastConsensusAcceptedBlock() + return &GetAcceptedFrontReply{ + Hash: blk.Hash(), + Number: blk.Number(), + }, nil } -func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsRequest, reply *GetCurrentValidatorsResponse) error { - api.vm.ctx.Lock.RLock() - defer api.vm.ctx.Lock.RUnlock() - - nodeIDs := set.Of(args.NodeIDs...) - if nodeIDs.Len() == 0 { - nodeIDs = api.vm.validatorState.GetValidatorIDs() - } - - reply.Validators = make([]CurrentValidator, 0, nodeIDs.Len()) - - for _, nodeID := range nodeIDs.List() { - validator, err := api.vm.validatorState.GetValidator(nodeID) - switch { - case err == database.ErrNotFound: - continue - case err != nil: - return err - } - - isConnected := api.vm.uptimeManager.IsConnected(nodeID) - - uptime, _, err := api.vm.uptimeManager.CalculateUptime(nodeID) - if err != nil { - return err - } - - reply.Validators = append(reply.Validators, CurrentValidator{ - ValidationID: validator.ValidationID, - NodeID: nodeID, - StartTime: validator.StartTime, - IsActive: validator.IsActive, - IsConnected: isConnected, - Uptime: time.Duration(uptime.Seconds()), - }) - } +// IssueBlock to the chain +func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { + log.Info("Issuing a new block") + api.vm.builder.signalTxsReady() return nil } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 432277de43..fdaa06a553 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -143,7 +143,6 @@ const ( adminEndpoint = "/admin" ethRPCEndpoint = "/rpc" ethWSEndpoint = "/ws" - validatorsEndpoint = "/validators" ethTxGossipNamespace = "eth_tx_gossip" ) @@ -1082,16 +1081,6 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "subnet-evm-admin") } - if vm.config.ValidatorsAPIEnabled { - validatorsAPI, err := newHandler("validators", &ValidatorsAPI{vm}) - if err != nil { - return nil, fmt.Errorf("failed to register service for admin API due to %w", err) - } - apis[validatorsEndpoint] = validatorsAPI - enabledAPIs = append(enabledAPIs, "validators") - } - - // RPC APIs if vm.config.SnowmanAPIEnabled { if err := handler.RegisterName("snowman", &SnowmanAPI{vm}); err != nil { return nil, err From 2a9da82e5df765ae932fd3239472d502f2ea4bfd Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 17:51:53 +0300 Subject: [PATCH 61/98] Revert "remove api changes" This reverts commit 8ef763fca65a7144ac35b69889873728d3485558. --- plugin/evm/api.go | 38 +++++++++++++++++++++ plugin/evm/config.go | 15 +++++--- plugin/evm/service.go | 79 +++++++++++++++++++++++++++++++------------ plugin/evm/vm.go | 11 ++++++ 4 files changed, 116 insertions(+), 27 deletions(-) create mode 100644 plugin/evm/api.go diff --git a/plugin/evm/api.go b/plugin/evm/api.go new file mode 100644 index 0000000000..a8fe61cbc0 --- /dev/null +++ b/plugin/evm/api.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// SnowmanAPI introduces snowman specific functionality to the evm +type SnowmanAPI struct{ vm *VM } + +// GetAcceptedFrontReply defines the reply that will be sent from the +// GetAcceptedFront API call +type GetAcceptedFrontReply struct { + Hash common.Hash `json:"hash"` + Number *big.Int `json:"number"` +} + +// GetAcceptedFront returns the last accepted block's hash and height +func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { + blk := api.vm.blockChain.LastConsensusAcceptedBlock() + return &GetAcceptedFrontReply{ + Hash: blk.Hash(), + Number: blk.Number(), + }, nil +} + +// IssueBlock to the chain +func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { + log.Info("Issuing a new block") + api.vm.builder.signalTxsReady() + return nil +} diff --git a/plugin/evm/config.go b/plugin/evm/config.go index d4a4b44af2..41aa8e418c 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -61,7 +61,10 @@ const ( // - state sync time: ~6 hrs. defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request - defaultDBType = pebbledb.Name + + defaultValidatorsAPIEnabled = true + + defaultDBType = pebbledb.Name ) type PBool bool @@ -91,10 +94,11 @@ type Config struct { AirdropFile string `json:"airdrop"` // Subnet EVM APIs - SnowmanAPIEnabled bool `json:"snowman-api-enabled"` - AdminAPIEnabled bool `json:"admin-api-enabled"` - AdminAPIDir string `json:"admin-api-dir"` - WarpAPIEnabled bool `json:"warp-api-enabled"` + SnowmanAPIEnabled bool `json:"snowman-api-enabled"` + ValidatorsAPIEnabled bool `json:"validators-api-enabled"` + AdminAPIEnabled bool `json:"admin-api-enabled"` + AdminAPIDir string `json:"admin-api-dir"` + WarpAPIEnabled bool `json:"warp-api-enabled"` // EnabledEthAPIs is a list of Ethereum services that should be enabled // If none is specified, then we use the default list [defaultEnabledAPIs] @@ -296,6 +300,7 @@ func (c *Config) SetDefaults() { c.StateSyncRequestSize = defaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize + c.ValidatorsAPIEnabled = defaultValidatorsAPIEnabled c.DatabaseType = defaultDBType } diff --git a/plugin/evm/service.go b/plugin/evm/service.go index a8fe61cbc0..52ffb11329 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -4,35 +4,70 @@ package evm import ( - "context" - "math/big" + "net/http" + "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) -// SnowmanAPI introduces snowman specific functionality to the evm -type SnowmanAPI struct{ vm *VM } +type ValidatorsAPI struct { + vm *VM +} + +type GetCurrentValidatorsRequest struct { + NodeIDs []ids.NodeID `json:"nodeIDs"` +} -// GetAcceptedFrontReply defines the reply that will be sent from the -// GetAcceptedFront API call -type GetAcceptedFrontReply struct { - Hash common.Hash `json:"hash"` - Number *big.Int `json:"number"` +type GetCurrentValidatorsResponse struct { + Validators []CurrentValidator `json:"validators"` } -// GetAcceptedFront returns the last accepted block's hash and height -func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { - blk := api.vm.blockChain.LastConsensusAcceptedBlock() - return &GetAcceptedFrontReply{ - Hash: blk.Hash(), - Number: blk.Number(), - }, nil +type CurrentValidator struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + StartTime time.Time `json:"startTime"` + IsActive bool `json:"isActive"` + IsConnected bool `json:"isConnected"` + Uptime time.Duration `json:"uptime"` } -// IssueBlock to the chain -func (api *SnowmanAPI) IssueBlock(ctx context.Context) error { - log.Info("Issuing a new block") - api.vm.builder.signalTxsReady() +func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsRequest, reply *GetCurrentValidatorsResponse) error { + api.vm.ctx.Lock.RLock() + defer api.vm.ctx.Lock.RUnlock() + + nodeIDs := set.Of(args.NodeIDs...) + if nodeIDs.Len() == 0 { + nodeIDs = api.vm.validatorState.GetValidatorIDs() + } + + reply.Validators = make([]CurrentValidator, 0, nodeIDs.Len()) + + for _, nodeID := range nodeIDs.List() { + validator, err := api.vm.validatorState.GetValidator(nodeID) + switch { + case err == database.ErrNotFound: + continue + case err != nil: + return err + } + + isConnected := api.vm.uptimeManager.IsConnected(nodeID) + + uptime, _, err := api.vm.uptimeManager.CalculateUptime(nodeID) + if err != nil { + return err + } + + reply.Validators = append(reply.Validators, CurrentValidator{ + ValidationID: validator.ValidationID, + NodeID: nodeID, + StartTime: validator.StartTime, + IsActive: validator.IsActive, + IsConnected: isConnected, + Uptime: time.Duration(uptime.Seconds()), + }) + } return nil } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index fdaa06a553..432277de43 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -143,6 +143,7 @@ const ( adminEndpoint = "/admin" ethRPCEndpoint = "/rpc" ethWSEndpoint = "/ws" + validatorsEndpoint = "/validators" ethTxGossipNamespace = "eth_tx_gossip" ) @@ -1081,6 +1082,16 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { enabledAPIs = append(enabledAPIs, "subnet-evm-admin") } + if vm.config.ValidatorsAPIEnabled { + validatorsAPI, err := newHandler("validators", &ValidatorsAPI{vm}) + if err != nil { + return nil, fmt.Errorf("failed to register service for admin API due to %w", err) + } + apis[validatorsEndpoint] = validatorsAPI + enabledAPIs = append(enabledAPIs, "validators") + } + + // RPC APIs if vm.config.SnowmanAPIEnabled { if err := handler.RegisterName("snowman", &SnowmanAPI{vm}); err != nil { return nil, err From 46c3316f5ca2c123dd729d24632afb95cd9a3aa4 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 18:34:05 +0300 Subject: [PATCH 62/98] remove wrapped cache --- warp/wrapped_cache.go | 52 ------------------------------------------- 1 file changed, 52 deletions(-) delete mode 100644 warp/wrapped_cache.go diff --git a/warp/wrapped_cache.go b/warp/wrapped_cache.go deleted file mode 100644 index 8d70ac50b2..0000000000 --- a/warp/wrapped_cache.go +++ /dev/null @@ -1,52 +0,0 @@ -// (c) 2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package warp - -import ( - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/sdk" - "github.com/ethereum/go-ethereum/log" - "google.golang.org/protobuf/proto" -) - -type wrappedCache struct { - cache.Cacher[ids.ID, []byte] -} - -// NewWrappedCache takes a SDK cache that caches SignatureResponses and wraps it -// to return the Signature from the SignatureResponse. -func NewWrappedCache(sdkCache cache.Cacher[ids.ID, []byte]) cache.Cacher[ids.ID, []byte] { - return &wrappedCache{ - Cacher: sdkCache, - } -} - -func (w *wrappedCache) Get(key ids.ID) ([]byte, bool) { - responseBytes, ok := w.Cacher.Get(key) - if !ok { - return responseBytes, false - } - response := sdk.SignatureResponse{} - err := proto.Unmarshal(responseBytes, &response) - if err != nil { - log.Error("failed to unmarshal cached SignatureResponse", "error", err) - return nil, false - } - - return response.Signature, true -} - -func (w *wrappedCache) Put(key ids.ID, value []byte) { - response := sdk.SignatureResponse{ - Signature: value, - } - responseBytes, err := proto.Marshal(&response) - if err != nil { - log.Error("failed to marshal SignatureResponse", "error", err) - return - } - - w.Cacher.Put(key, responseBytes) -} From 9a50ee86d05938214bf39c549534ae857540b23f Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 29 Oct 2024 19:21:49 +0300 Subject: [PATCH 63/98] use non-version db for validatorsDB --- plugin/evm/vm.go | 6 ++++-- plugin/evm/vm_validators_state_test.go | 7 ++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index fdaa06a553..bcfb7bbb33 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -224,6 +224,8 @@ type VM struct { // set to a prefixDB with the prefix [warpPrefix] warpDB database.Database + validatorsDB database.Database + toEngine chan<- commonEng.Message syntacticBlockValidator BlockValidator @@ -502,8 +504,7 @@ func (vm *VM) Initialize( vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) - validatorsDB := prefixdb.New(validatorsDBPrefix, vm.db) - vm.validatorState, err = validators.NewState(validatorsDB) + vm.validatorState, err = validators.NewState(vm.validatorsDB) if err != nil { return fmt.Errorf("failed to initialize validator state: %w", err) } @@ -1340,6 +1341,7 @@ func (vm *VM) initializeDBs(avaDB database.Database) error { // [warpDB] is used to store warp message signatures // set to a prefixDB with the prefix [warpPrefix] vm.warpDB = prefixdb.New(warpPrefix, db) + vm.validatorsDB = prefixdb.New(validatorsDBPrefix, db) return nil } diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 4747fb0fbf..214168475f 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -2,7 +2,6 @@ package evm import ( "context" - "fmt" "testing" "time" @@ -97,15 +96,13 @@ func TestValidatorState(t *testing.T) { require.False(vm.uptimeManager.StartedTracking()) vm = &VM{} - validatorsLoadFrequency := 5 * time.Second - configJSON := fmt.Sprintf(`{"load-validators-frequency": %g}`, validatorsLoadFrequency.Seconds()) err = vm.Initialize( context.Background(), NewContext(), // this context does not have validators state, making VM to source it from the database dbManager, genesisBytes, []byte(""), - []byte(configJSON), + []byte(""), issuer, []*commonEng.Fx{}, appSender, @@ -156,7 +153,7 @@ func TestValidatorState(t *testing.T) { newValidator, err := vm.validatorState.GetValidator(newNodeID) assert.NoError(c, err) assert.Equal(c, newNodeID, newValidator.NodeID) - }, validatorsLoadFrequency*2, validatorsLoadFrequency/2) + }, loadValidatorsFrequency*2, 5*time.Second) } func TestLoadNewValidators(t *testing.T) { From af39b2c3f0d42513c4f888a9a0259aa5a8e548e6 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 30 Oct 2024 21:44:56 +0300 Subject: [PATCH 64/98] remove errs from resume and pause --- plugin/evm/uptime/pausable_manager.go | 12 +----------- plugin/evm/uptime/pausable_manager_test.go | 8 ++++++++ 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go index 18370d41f6..89ec278990 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/uptime/pausable_manager.go @@ -16,11 +16,7 @@ import ( var _ validators.StateCallbackListener = &pausableManager{} -var ( - errPausedDisconnect = errors.New("paused node cannot be disconnected") - errAlreadyPaused = errors.New("node is already paused") - errNotPaused = errors.New("node is not paused") -) +var errPausedDisconnect = errors.New("paused node cannot be disconnected") type PausableManager interface { uptime.Manager @@ -127,9 +123,6 @@ func (p *pausableManager) IsPaused(nodeID ids.NodeID) bool { // pause can disconnect the node from the uptime.Manager if it is connected. // Returns an error if the node is already paused. func (p *pausableManager) pause(nodeID ids.NodeID) error { - if p.IsPaused(nodeID) { - return errAlreadyPaused - } p.pausedVdrs.Add(nodeID) if p.Manager.IsConnected(nodeID) { // If the node is connected, then we need to disconnect it from @@ -145,9 +138,6 @@ func (p *pausableManager) pause(nodeID ids.NodeID) error { // resume can connect the node to the uptime.Manager if it was connected. // Returns an error if the node is not paused. func (p *pausableManager) resume(nodeID ids.NodeID) error { - if !p.IsPaused(nodeID) { - return errNotPaused - } p.pausedVdrs.Remove(nodeID) if p.connectedVdrs.Contains(nodeID) && !p.Manager.IsConnected(nodeID) { return p.Manager.Connect(nodeID) diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go index 03a6228d22..b1362cf574 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -29,6 +29,7 @@ func TestPausableManager(t *testing.T) { // Pause before tracking up.OnValidatorStatusUpdated(vID, nodeID0, false) + require.True(up.IsPaused(nodeID0)) // Elapse Time addTime(clk, time.Second) @@ -59,6 +60,7 @@ func TestPausableManager(t *testing.T) { // Pause addTime(clk, time.Second) up.OnValidatorStatusUpdated(vID, nodeID0, false) + require.True(up.IsPaused(nodeID0)) // Elapse time currentTime := addTime(clk, 2*time.Second) @@ -81,6 +83,7 @@ func TestPausableManager(t *testing.T) { // Resume and check uptime currentTime = addTime(clk, 6*time.Second) up.OnValidatorStatusUpdated(vID, nodeID0, true) + require.False(up.IsPaused(nodeID0)) // Uptime should not have increased since the node was paused // and we just resumed it checkUptime(t, up, nodeID0, 1*time.Second, currentTime) @@ -97,6 +100,7 @@ func TestPausableManager(t *testing.T) { // Pause before tracking up.OnValidatorStatusUpdated(vID, nodeID0, false) + require.True(up.IsPaused(nodeID0)) // Start tracking addTime(clk, time.Second) @@ -121,6 +125,7 @@ func TestPausableManager(t *testing.T) { require.NoError(up.Connect(nodeID0)) addTime(clk, 5*time.Second) up.OnValidatorStatusUpdated(vID, nodeID0, true) + require.False(up.IsPaused(nodeID0)) // Check uptime after resume currentTime = addTime(clk, 6*time.Second) @@ -140,6 +145,7 @@ func TestPausableManager(t *testing.T) { // Pause and check uptime currentTime := addTime(clk, 2*time.Second) up.OnValidatorStatusUpdated(vID, nodeID0, false) + require.True(up.IsPaused(nodeID0)) // Uptime should be 2 seconds since the node was paused after 2 seconds checkUptime(t, up, nodeID0, 2*time.Second, currentTime) @@ -154,12 +160,14 @@ func TestPausableManager(t *testing.T) { // Pause and check uptime up.OnValidatorStatusUpdated(vID, nodeID0, false) + require.True(up.IsPaused(nodeID0)) // Uptime should not have increased since the node was paused checkUptime(t, up, nodeID0, 2*time.Second, currentTime) // Resume and check uptime currentTime = addTime(clk, 5*time.Second) up.OnValidatorStatusUpdated(vID, nodeID0, true) + require.False(up.IsPaused(nodeID0)) // Uptime should have increased by 5 seconds since the node was resumed checkUptime(t, up, nodeID0, 7*time.Second, currentTime) From d5d35453bc2946d3da4e09ce9a43e4aa6d382923 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 30 Oct 2024 21:59:26 +0300 Subject: [PATCH 65/98] check after stopping --- plugin/evm/uptime/pausable_manager_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go index b1362cf574..6b38b96139 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -152,6 +152,7 @@ func TestPausableManager(t *testing.T) { // Stop tracking and reinitialize manager currentTime = addTime(clk, 3*time.Second) require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) + checkUptime(t, up, nodeID0, 2*time.Second, currentTime) up = NewPausableManager(uptime.NewManager(s, clk)) // Uptime should not have increased since the node was paused @@ -206,6 +207,7 @@ func addTime(clk *mockable.Clock, duration time.Duration) time.Time { } func checkUptime(t *testing.T, up PausableManager, nodeID ids.NodeID, expectedUptime time.Duration, expectedLastUpdate time.Time) { + t.Helper() uptime, lastUpdated, err := up.CalculateUptime(nodeID) require.NoError(t, err) require.Equal(t, expectedLastUpdate.Unix(), lastUpdated.Unix()) From 6fffc2b519296a4d535c897bc53bbd81c98e7112 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 31 Oct 2024 14:28:17 +0300 Subject: [PATCH 66/98] use expectedTime in tests --- plugin/evm/uptime/pausable_manager_test.go | 50 +++++++++++++--------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go index 6b38b96139..14c0b3b9d4 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -38,12 +38,13 @@ func TestPausableManager(t *testing.T) { require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime := addTime(clk, time.Second) // Uptime should not have increased since the node was paused - checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + expectedUptime := 0 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Disconnect require.NoError(up.Disconnect(nodeID0)) // Uptime should not have increased - checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) } // Case 2: Start tracking, connect, pause, re-connect, resume @@ -54,31 +55,32 @@ func TestPausableManager(t *testing.T) { require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) // Connect - addTime(clk, time.Second) + addTime(clk, 1*time.Second) require.NoError(up.Connect(nodeID0)) // Pause - addTime(clk, time.Second) + addTime(clk, 1*time.Second) up.OnValidatorStatusUpdated(vID, nodeID0, false) require.True(up.IsPaused(nodeID0)) // Elapse time currentTime := addTime(clk, 2*time.Second) // Uptime should be 1 second since the node was paused after 1 sec - checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + expectedUptime := 1 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Disconnect and check uptime currentTime = addTime(clk, 3*time.Second) require.NoError(up.Disconnect(nodeID0)) // Uptime should not have increased since the node was paused - checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Connect again and check uptime addTime(clk, 4*time.Second) require.NoError(up.Connect(nodeID0)) currentTime = addTime(clk, 5*time.Second) // Uptime should not have increased since the node was paused - checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Resume and check uptime currentTime = addTime(clk, 6*time.Second) @@ -86,12 +88,13 @@ func TestPausableManager(t *testing.T) { require.False(up.IsPaused(nodeID0)) // Uptime should not have increased since the node was paused // and we just resumed it - checkUptime(t, up, nodeID0, 1*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Elapsed time check currentTime = addTime(clk, 7*time.Second) // Uptime should increase by 7 seconds above since the node was resumed - checkUptime(t, up, nodeID0, 8*time.Second, currentTime) + expectedUptime += 7 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) } // Case 3: Pause, start tracking, connect, re-connect, resume @@ -112,13 +115,14 @@ func TestPausableManager(t *testing.T) { currentTime := addTime(clk, 2*time.Second) // Uptime should not have increased since the node was paused - checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + expectedUptime := 0 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Disconnect and check uptime currentTime = addTime(clk, 3*time.Second) require.NoError(up.Disconnect(nodeID0)) // Uptime should not have increased since the node was paused - checkUptime(t, up, nodeID0, 0*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Connect again and resume addTime(clk, 4*time.Second) @@ -130,7 +134,8 @@ func TestPausableManager(t *testing.T) { // Check uptime after resume currentTime = addTime(clk, 6*time.Second) // Uptime should have increased by 6 seconds since the node was resumed - checkUptime(t, up, nodeID0, 6*time.Second, currentTime) + expectedUptime += 6 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) } // Case 4: Start tracking, connect, pause, stop tracking, resume tracking. @@ -147,48 +152,53 @@ func TestPausableManager(t *testing.T) { up.OnValidatorStatusUpdated(vID, nodeID0, false) require.True(up.IsPaused(nodeID0)) // Uptime should be 2 seconds since the node was paused after 2 seconds - checkUptime(t, up, nodeID0, 2*time.Second, currentTime) + expectedUptime := 2 * time.Second + + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Stop tracking and reinitialize manager currentTime = addTime(clk, 3*time.Second) require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) - checkUptime(t, up, nodeID0, 2*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) up = NewPausableManager(uptime.NewManager(s, clk)) // Uptime should not have increased since the node was paused // and we have not started tracking again - checkUptime(t, up, nodeID0, 2*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Pause and check uptime up.OnValidatorStatusUpdated(vID, nodeID0, false) require.True(up.IsPaused(nodeID0)) // Uptime should not have increased since the node was paused - checkUptime(t, up, nodeID0, 2*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Resume and check uptime currentTime = addTime(clk, 5*time.Second) up.OnValidatorStatusUpdated(vID, nodeID0, true) require.False(up.IsPaused(nodeID0)) // Uptime should have increased by 5 seconds since the node was resumed - checkUptime(t, up, nodeID0, 7*time.Second, currentTime) + expectedUptime += 5 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Start tracking and check elapsed time currentTime = addTime(clk, 6*time.Second) require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) // Uptime should have increased by 6 seconds since we started tracking // and node was resumed (we assume the node was online until we started tracking) - checkUptime(t, up, nodeID0, 13*time.Second, currentTime) + expectedUptime += 6 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Elapsed time currentTime = addTime(clk, 7*time.Second) // Uptime should not have increased since the node was not connected - checkUptime(t, up, nodeID0, 13*time.Second, currentTime) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Connect and final uptime check require.NoError(up.Connect(nodeID0)) currentTime = addTime(clk, 8*time.Second) // Uptime should have increased by 8 seconds since the node was connected - checkUptime(t, up, nodeID0, 21*time.Second, currentTime) + expectedUptime += 8 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) } } From 4625f7c0f165abb77620e87d4b2907509c830df8 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 31 Oct 2024 14:49:34 +0300 Subject: [PATCH 67/98] reviews --- plugin/evm/vm.go | 1 - plugin/evm/vm_validators_state_test.go | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index bcfb7bbb33..cbfad2a260 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1462,7 +1462,6 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { return nil } -// TODO: cache the last updated height and then load if needed // loadValidators loads the [validators] into the validator state [validatorState] func loadValidators(validatorState validators.State, validators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { currentValidationIDs := validatorState.GetValidationIDs() diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 214168475f..6b0872d72a 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -77,15 +77,15 @@ func TestValidatorState(t *testing.T) { require.NoError(err, "error initializing GenesisVM") // Test case 1: state should not be populated until bootstrapped + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) require.Equal(0, vm.validatorState.GetValidationIDs().Len()) _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) require.ErrorIs(database.ErrNotFound, err) require.False(vm.uptimeManager.StartedTracking()) // Test case 2: state should be populated after bootstrapped - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - require.Equal(3, vm.validatorState.GetValidationIDs().Len()) + require.Len(vm.validatorState.GetValidationIDs(), 3) _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) require.NoError(err) require.True(vm.uptimeManager.StartedTracking()) @@ -108,7 +108,7 @@ func TestValidatorState(t *testing.T) { appSender, ) require.NoError(err, "error initializing GenesisVM") - require.Equal(3, vm.validatorState.GetValidationIDs().Len()) + require.Len(vm.validatorState.GetValidationIDs().Len(), 3) _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) require.NoError(err) require.False(vm.uptimeManager.StartedTracking()) @@ -149,7 +149,7 @@ func TestValidatorState(t *testing.T) { // new validator should be added to the state eventually after validatorsLoadFrequency require.EventuallyWithT(func(c *assert.CollectT) { - assert.Equal(c, 4, vm.validatorState.GetValidatorIDs().Len()) + assert.Len(c, vm.validatorState.GetValidatorIDs(), 4) newValidator, err := vm.validatorState.GetValidator(newNodeID) assert.NoError(c, err) assert.Equal(c, newNodeID, newValidator.NodeID) From e1975cf0921529d9ce719eaa0aca7e195fe2b8f3 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 31 Oct 2024 14:51:42 +0300 Subject: [PATCH 68/98] Update plugin/evm/vm.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 3b0ba3901f..01a640870a 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1086,7 +1086,7 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]http.Handler, error) { if vm.config.ValidatorsAPIEnabled { validatorsAPI, err := newHandler("validators", &ValidatorsAPI{vm}) if err != nil { - return nil, fmt.Errorf("failed to register service for admin API due to %w", err) + return nil, fmt.Errorf("failed to register service for validators API due to %w", err) } apis[validatorsEndpoint] = validatorsAPI enabledAPIs = append(enabledAPIs, "validators") From 74bd249c2d5adec85f9a305b27b883ba71e0f542 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 31 Oct 2024 17:25:01 +0300 Subject: [PATCH 69/98] fix len --- plugin/evm/vm_validators_state_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 6b0872d72a..00d7c9df7c 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -108,7 +108,7 @@ func TestValidatorState(t *testing.T) { appSender, ) require.NoError(err, "error initializing GenesisVM") - require.Len(vm.validatorState.GetValidationIDs().Len(), 3) + require.Len(vm.validatorState.GetValidationIDs(), 3) _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) require.NoError(err) require.False(vm.uptimeManager.StartedTracking()) From 0a9810118f9298cb0f10b491378e38a49d84af49 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 1 Nov 2024 16:58:09 +0300 Subject: [PATCH 70/98] fix tests --- plugin/evm/service.go | 2 +- plugin/evm/vm_validators_state_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 52ffb11329..f434b09ca3 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -39,7 +39,7 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrent nodeIDs := set.Of(args.NodeIDs...) if nodeIDs.Len() == 0 { - nodeIDs = api.vm.validatorState.GetValidatorIDs() + nodeIDs = api.vm.validatorState.GetNodeIDs() } reply.Validators = make([]CurrentValidator, 0, nodeIDs.Len()) diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 00d7c9df7c..87bb7bcac6 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -149,7 +149,7 @@ func TestValidatorState(t *testing.T) { // new validator should be added to the state eventually after validatorsLoadFrequency require.EventuallyWithT(func(c *assert.CollectT) { - assert.Len(c, vm.validatorState.GetValidatorIDs(), 4) + assert.Len(c, vm.validatorState.GetNodeIDs(), 4) newValidator, err := vm.validatorState.GetValidator(newNodeID) assert.NoError(c, err) assert.Equal(c, newNodeID, newValidator.NodeID) From 52b83e288957d2bc6716c16372242174c1414697 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Mon, 4 Nov 2024 12:20:25 +0300 Subject: [PATCH 71/98] update avago branch --- go.mod | 29 ++++++++++- go.sum | 67 +++++++++++++++++++++++--- plugin/evm/vm.go | 2 +- plugin/evm/vm_validators_state_test.go | 8 +-- 4 files changed, 93 insertions(+), 13 deletions(-) diff --git a/go.mod b/go.mod index eb7519162f..b329ca1863 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9 + github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -76,17 +76,23 @@ require ( github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/gorilla/mux v1.8.0 // indirect @@ -96,17 +102,25 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/gateway v1.0.6 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.15 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mr-tron/base58 v1.2.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect @@ -124,7 +138,7 @@ require ( github.com/spf13/afero v1.8.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect - github.com/supranational/blst v0.3.11 // indirect + github.com/supranational/blst v0.3.13 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect @@ -141,14 +155,25 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect google.golang.org/grpc v1.66.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.29.0 // indirect + k8s.io/apimachinery v0.29.0 // indirect + k8s.io/client-go v0.29.0 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index c5d14ece7a..ca120b2521 100644 --- a/go.sum +++ b/go.sum @@ -58,10 +58,10 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.13-0.20241026214739-acb3d7d102a0 h1:1T9OnvZP6XZ62EVWlfmrI8rrudyE6bM2Zt51pCHfS5o= -github.com/ava-labs/avalanchego v1.11.13-0.20241026214739-acb3d7d102a0/go.mod h1:gYlTU42Q4b29hzhUN22yclym5qwB3Si0jh4+LTn7DZM= -github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9 h1:rX5Xn5WBPppyvvZXp8wwx9dX+4fBY2/XeNIEm18ifzk= -github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9/go.mod h1:gYlTU42Q4b29hzhUN22yclym5qwB3Si0jh4+LTn7DZM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b h1:qvjqst/9X/c4tvI6nFzOwoTeRVIOMPAtEf1liDYrlhI= +github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b/go.mod h1:86tO6F1FT8emclUwdQ2WCwAtAerqjm5A4IbV6XxNUyM= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= @@ -173,6 +173,8 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -214,6 +216,7 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -222,6 +225,12 @@ github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AE github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -280,6 +289,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -290,10 +301,13 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -326,6 +340,7 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -367,9 +382,13 @@ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7Bd github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -406,6 +425,8 @@ github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2 github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -439,13 +460,22 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= @@ -548,6 +578,7 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -557,12 +588,13 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= +github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= @@ -742,6 +774,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1022,6 +1056,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1033,6 +1069,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -1050,8 +1087,26 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index b988ecb34d..8d3d09075f 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1456,7 +1456,7 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { now := time.Now() log.Debug("performing validator update") // get current validator set - currentValidatorSet, _, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) + currentValidatorSet, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) if err != nil { return fmt.Errorf("failed to get current validator set: %w", err) } diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 87bb7bcac6..fecd79d5df 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -42,7 +42,7 @@ func TestValidatorState(t *testing.T) { ids.GenerateTestID(), } ctx.ValidatorState = &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, bool, error) { + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], @@ -59,7 +59,7 @@ func TestValidatorState(t *testing.T) { PublicKey: nil, Weight: 1, }, - }, 0, false, nil + }, 0, nil }, } appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } @@ -117,7 +117,7 @@ func TestValidatorState(t *testing.T) { newValidationID := ids.GenerateTestID() newNodeID := ids.GenerateTestNodeID() testState := &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, bool, error) { + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], @@ -139,7 +139,7 @@ func TestValidatorState(t *testing.T) { PublicKey: nil, Weight: 1, }, - }, 0, false, nil + }, 0, nil }, } vm.ctx.ValidatorState = testState From 93bedf3efcbe47a7851f753ba16bec3890e59459 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 5 Nov 2024 12:33:47 +0300 Subject: [PATCH 72/98] use ctx from utils --- plugin/evm/vm_test.go | 55 +++----------------------- plugin/evm/vm_validators_state_test.go | 3 +- plugin/evm/vm_warp_test.go | 4 +- scripts/versions.sh | 2 +- utils/snow.go | 41 +++++++++++++++++-- 5 files changed, 48 insertions(+), 57 deletions(-) diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index d69bf0af95..d679d9e1a5 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -33,11 +33,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" - avalancheConstants "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/chain" @@ -63,17 +59,14 @@ import ( "github.com/ava-labs/subnet-evm/vmerrs" avagoconstants "github.com/ava-labs/avalanchego/utils/constants" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) var ( - testNetworkID uint32 = avagoconstants.UnitTestID - testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} - testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} - testMinGasPrice int64 = 225_000_000_000 + testNetworkID uint32 = avagoconstants.UnitTestID + + testMinGasPrice int64 = 225_000_000_000 testKeys []*ecdsa.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] - testAvaxAssetID = ids.ID{1, 2, 3} username = "Johns" password = "CjasdjhiPeirbSenfeI13" // #nosec G101 @@ -139,44 +132,6 @@ func buildGenesisTest(t *testing.T, genesisJSON string) []byte { return genesisBytes } -func NewContext() *snow.Context { - ctx := utils.TestSnowContext() - ctx.NodeID = ids.GenerateTestNodeID() - ctx.NetworkID = testNetworkID - ctx.ChainID = testCChainID - ctx.AVAXAssetID = testAvaxAssetID - ctx.XChainID = testXChainID - aliaser := ctx.BCLookup.(ids.Aliaser) - _ = aliaser.Alias(testCChainID, "C") - _ = aliaser.Alias(testCChainID, testCChainID.String()) - _ = aliaser.Alias(testXChainID, "X") - _ = aliaser.Alias(testXChainID, testXChainID.String()) - ctx.ValidatorState = &validatorstest.State{ - GetCurrentHeightF: func(ctx context.Context) (uint64, error) { return 0, nil }, - GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return map[ids.NodeID]*validators.GetValidatorOutput{}, nil - }, - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - avalancheConstants.PlatformChainID: avalancheConstants.PrimaryNetworkID, - testXChainID: avalancheConstants.PrimaryNetworkID, - testCChainID: avalancheConstants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errors.New("unknown chain") - } - return subnetID, nil - }, - } - blsSecretKey, err := bls.NewSecretKey() - if err != nil { - panic(err) - } - ctx.WarpSigner = avalancheWarp.NewSigner(blsSecretKey, ctx.NetworkID, ctx.ChainID) - ctx.PublicKey = bls.PublicFromSecretKey(blsSecretKey) - return ctx -} - // setupGenesis sets up the genesis // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] func setupGenesis( @@ -192,7 +147,7 @@ func setupGenesis( genesisJSON = genesisJSONLatest } genesisBytes := buildGenesisTest(t, genesisJSON) - ctx := NewContext() + ctx := utils.TestSnowContext() // initialize the atomic memory atomicMemory := atomic.NewMemory(prefixdb.New([]byte{0}, memdb.New())) @@ -495,7 +450,7 @@ func TestBuildEthTxBlock(t *testing.T) { if err := restartedVM.Initialize( context.Background(), - NewContext(), + utils.TestSnowContext(), dbManager, genesisBytes, []byte(""), diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index fecd79d5df..f850b58a28 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/plugin/evm/validators" + "github.com/ava-labs/subnet-evm/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -98,7 +99,7 @@ func TestValidatorState(t *testing.T) { vm = &VM{} err = vm.Initialize( context.Background(), - NewContext(), // this context does not have validators state, making VM to source it from the database + utils.TestSnowContext(), // this context does not have validators state, making VM to source it from the database dbManager, genesisBytes, []byte(""), diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index b80af6fdd5..8e207b1e60 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -481,7 +481,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by subnet without RequirePrimaryNetworkSigners", - sourceChainID: testCChainID, + sourceChainID: utils.TestCChainID, msgFrom: fromPrimary, useSigners: signersSubnet, blockTime: upgrade.InitiallyActiveTime.Add(2 * blockGap), @@ -504,7 +504,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by primary with RequirePrimaryNetworkSigners (impacted)", - sourceChainID: testCChainID, + sourceChainID: utils.TestCChainID, msgFrom: fromPrimary, useSigners: signersPrimary, blockTime: reEnableTime.Add(2 * blockGap), diff --git a/scripts/versions.sh b/scripts/versions.sh index fa04254c77..24da5f9fbd 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'e05aa37a'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'0b7a136'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier diff --git a/utils/snow.go b/utils/snow.go index 090d38102f..7f5f95e9e6 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -5,6 +5,7 @@ package utils import ( "context" + "errors" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" @@ -15,6 +16,12 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +var ( + TestCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} + testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} ) func TestSnowContext() *snow.Context { @@ -23,19 +30,33 @@ func TestSnowContext() *snow.Context { panic(err) } pk := bls.PublicFromSecretKey(sk) - return &snow.Context{ - NetworkID: constants.UnitTestID, + networkID := constants.UnitTestID + chainID := ids.Empty + + ctx := &snow.Context{ + NetworkID: networkID, SubnetID: ids.Empty, - ChainID: ids.Empty, + ChainID: chainID, NodeID: ids.EmptyNodeID, + XChainID: testXChainID, + CChainID: TestCChainID, NetworkUpgrades: upgradetest.GetConfig(upgradetest.Latest), PublicKey: pk, + WarpSigner: warp.NewSigner(sk, networkID, chainID), Log: logging.NoLog{}, BCLookup: ids.NewAliaser(), Metrics: metrics.NewPrefixGatherer(), ChainDataDir: "", ValidatorState: NewTestValidatorState(), } + + aliaser := ctx.BCLookup.(ids.Aliaser) + _ = aliaser.Alias(TestCChainID, "C") + _ = aliaser.Alias(TestCChainID, TestCChainID.String()) + _ = aliaser.Alias(testXChainID, "X") + _ = aliaser.Alias(testXChainID, testXChainID.String()) + + return ctx } func NewTestValidatorState() *validatorstest.State { @@ -43,8 +64,22 @@ func NewTestValidatorState() *validatorstest.State { GetCurrentHeightF: func(context.Context) (uint64, error) { return 0, nil }, + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + testXChainID: constants.PrimaryNetworkID, + TestCChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errors.New("unknown chain") + } + return subnetID, nil + }, GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { return make(map[ids.NodeID]*validators.GetValidatorOutput), nil }, + GetCurrentValidatorSetF: func(context.Context, ids.ID) (map[ids.ID]*validators.GetCurrentValidatorOutput, uint64, error) { + return make(map[ids.ID]*validators.GetCurrentValidatorOutput), 0, nil + }, } } From 2ebf56bdf1ae5ab8c9837c4eee7df5793113c16f Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 5 Nov 2024 12:40:11 +0300 Subject: [PATCH 73/98] add empty check for source address --- warp/verifier_backend.go | 11 +++++++++-- warp/verifier_backend_test.go | 20 +++++++++++++------- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index 6890d876d9..97ae9d93d9 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -81,7 +81,7 @@ func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *com switch p := parsed.(type) { case *messages.ValidatorUptime: - if err := b.verifyUptimeMessage(p); err != nil { + if err := b.verifyUptimeMessage(addressedCall.SourceAddress, p); err != nil { b.stats.IncUptimeValidationFail() return err } @@ -96,7 +96,14 @@ func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *com return nil } -func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *common.AppError { +func (b *backend) verifyUptimeMessage(sourceAddress []byte, uptimeMsg *messages.ValidatorUptime) *common.AppError { + if len(sourceAddress) != 0 { + return &common.AppError{ + Code: VerifyErrCode, + Message: "source address should be empty for uptime message", + } + } + // first get the validator's nodeID nodeID, err := b.validatorState.GetNodeID(uptimeMsg.ValidationID) if err != nil { diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 2f4a8069df..952bf8c3f5 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -267,10 +267,10 @@ func TestUptimeSignatures(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(blsSecretKey, snowCtx.NetworkID, snowCtx.ChainID) - getUptimeMessageBytes := func(vID ids.ID, totalUptime uint64) ([]byte, *avalancheWarp.UnsignedMessage) { + getUptimeMessageBytes := func(sourceAddress []byte, vID ids.ID, totalUptime uint64) ([]byte, *avalancheWarp.UnsignedMessage) { uptimePayload, err := messages.NewValidatorUptime(vID, 80) require.NoError(t, err) - addressedCall, err := payload.NewAddressedCall([]byte{1, 2, 3}, uptimePayload.Bytes()) + addressedCall, err := payload.NewAddressedCall(sourceAddress, uptimePayload.Bytes()) require.NoError(t, err) unsignedMessage, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, addressedCall.Bytes()) require.NoError(t, err) @@ -297,10 +297,16 @@ func TestUptimeSignatures(t *testing.T) { require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) + // sourceAddress nonZero + protoBytes, _ := getUptimeMessageBytes([]byte{1, 2, 3}, ids.GenerateTestID(), 80) + _, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) + require.Contains(t, appErr.Error(), "source address should be empty") + // not existing validationID vID := ids.GenerateTestID() - protoBytes, _ := getUptimeMessageBytes(vID, 80) - _, appErr := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) + protoBytes, _ = getUptimeMessageBytes([]byte{}, vID, 80) + _, appErr = handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, protoBytes) require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) require.Contains(t, appErr.Error(), "failed to get validator") @@ -308,7 +314,7 @@ func TestUptimeSignatures(t *testing.T) { validationID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() require.NoError(t, state.AddValidator(validationID, nodeID, clk.Unix(), true)) - protoBytes, _ = getUptimeMessageBytes(validationID, 80) + protoBytes, _ = getUptimeMessageBytes([]byte{}, validationID, 80) _, appErr = handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) require.Contains(t, appErr.Error(), "current uptime 0 is less than queried uptime 80") @@ -316,14 +322,14 @@ func TestUptimeSignatures(t *testing.T) { // uptime is less than requested (not enough) require.NoError(t, uptimeManager.Connect(nodeID)) clk.Set(clk.Time().Add(40 * time.Second)) - protoBytes, _ = getUptimeMessageBytes(validationID, 80) + protoBytes, _ = getUptimeMessageBytes([]byte{}, validationID, 80) _, appErr = handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) require.Contains(t, appErr.Error(), "current uptime 40 is less than queried uptime 80") // valid uptime clk.Set(clk.Time().Add(40 * time.Second)) - protoBytes, msg := getUptimeMessageBytes(validationID, 80) + protoBytes, msg := getUptimeMessageBytes([]byte{}, validationID, 80) responseBytes, appErr := handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) require.Nil(t, appErr) expectedSignature, err := warpSigner.Sign(msg) From 33a30f0a431cb1913ca645c6cb3a352c3df8f48d Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 5 Nov 2024 13:40:06 +0300 Subject: [PATCH 74/98] nits --- plugin/evm/vm.go | 8 ++++---- scripts/versions.sh | 2 +- utils/snow.go | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 7a1ba133be..b6b2ed97f6 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -267,7 +267,7 @@ type VM struct { ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper - UptimeLockedCalculator avalancheUptime.LockedCalculator + lockedUptimeCalculator avalancheUptime.LockedCalculator uptimeManager uptime.PausableManager validatorState validators.State @@ -512,8 +512,8 @@ func (vm *VM) Initialize( } vm.uptimeManager = uptime.NewPausableManager(avalancheUptime.NewManager(vm.validatorState, &vm.clock)) - vm.UptimeLockedCalculator = avalancheUptime.NewLockedCalculator() - vm.UptimeLockedCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) + vm.lockedUptimeCalculator = avalancheUptime.NewLockedCalculator() + vm.lockedUptimeCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) vm.validatorState.RegisterListener(vm.uptimeManager) // Initialize warp backend @@ -539,7 +539,7 @@ func (vm *VM) Initialize( vm.ctx.ChainID, vm.ctx.WarpSigner, vm, - vm.UptimeLockedCalculator, + vm.lockedUptimeCalculator, validators.NewLockedStateReader(vm.ctx.Lock.RLocker(), vm.validatorState), vm.warpDB, meteredCache, diff --git a/scripts/versions.sh b/scripts/versions.sh index 24da5f9fbd..248a2002d8 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'0b7a136'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'0b7a136f'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier diff --git a/utils/snow.go b/utils/snow.go index 7f5f95e9e6..a03678bc9e 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -31,13 +31,13 @@ func TestSnowContext() *snow.Context { } pk := bls.PublicFromSecretKey(sk) networkID := constants.UnitTestID - chainID := ids.Empty + chainID := ids.GenerateTestID() ctx := &snow.Context{ NetworkID: networkID, SubnetID: ids.Empty, ChainID: chainID, - NodeID: ids.EmptyNodeID, + NodeID: ids.GenerateTestNodeID(), XChainID: testXChainID, CChainID: TestCChainID, NetworkUpgrades: upgradetest.GetConfig(upgradetest.Latest), @@ -76,10 +76,10 @@ func NewTestValidatorState() *validatorstest.State { return subnetID, nil }, GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return make(map[ids.NodeID]*validators.GetValidatorOutput), nil + return map[ids.NodeID]*validators.GetValidatorOutput{}, nil }, GetCurrentValidatorSetF: func(context.Context, ids.ID) (map[ids.ID]*validators.GetCurrentValidatorOutput, uint64, error) { - return make(map[ids.ID]*validators.GetCurrentValidatorOutput), 0, nil + return map[ids.ID]*validators.GetCurrentValidatorOutput{}, 0, nil }, } } From 2aebfe657e1ffb467bea07175a0671d53e115501 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 5 Nov 2024 14:02:14 +0300 Subject: [PATCH 75/98] remove log --- plugin/evm/vm.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 892945fac6..df294d73b9 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1445,8 +1445,6 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { return fmt.Errorf("failed to get current validator set: %w", err) } - log.Info("updating validators", "validatorSet", currentValidatorSet) - // load the current validator set into the validator state if err := loadValidators(vm.validatorState, currentValidatorSet); err != nil { return fmt.Errorf("failed to load current validators: %w", err) From 25076c3ec859460263694752849f76825dcf1d07 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 5 Nov 2024 14:17:14 +0300 Subject: [PATCH 76/98] disable validators api by default --- plugin/evm/config.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 41aa8e418c..f911df75ce 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -61,10 +61,7 @@ const ( // - state sync time: ~6 hrs. defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request - - defaultValidatorsAPIEnabled = true - - defaultDBType = pebbledb.Name + defaultDBType = pebbledb.Name ) type PBool bool @@ -300,7 +297,6 @@ func (c *Config) SetDefaults() { c.StateSyncRequestSize = defaultStateSyncRequestSize c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize - c.ValidatorsAPIEnabled = defaultValidatorsAPIEnabled c.DatabaseType = defaultDBType } From 9d3a1b3454583d9e2a08d39e2dae80e363f191db Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 5 Nov 2024 18:51:36 +0300 Subject: [PATCH 77/98] fix test context --- plugin/evm/tx_gossip_test.go | 9 --------- plugin/evm/vm_warp_test.go | 4 ++-- utils/snow.go | 13 +++++++------ 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index a87a7a3896..0eaaa63e6d 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -155,14 +154,6 @@ func TestEthTxPushGossipOutbound(t *testing.T) { require := require.New(t) ctx := context.Background() snowCtx := utils.TestSnowContext() - snowCtx.ValidatorState = &validatorstest.State{ - GetCurrentHeightF: func(context.Context) (uint64, error) { - return 0, nil - }, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return nil, nil - }, - } sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 1), } diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index 8e207b1e60..2131b79d94 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -481,7 +481,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by subnet without RequirePrimaryNetworkSigners", - sourceChainID: utils.TestCChainID, + sourceChainID: vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersSubnet, blockTime: upgrade.InitiallyActiveTime.Add(2 * blockGap), @@ -504,7 +504,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by primary with RequirePrimaryNetworkSigners (impacted)", - sourceChainID: utils.TestCChainID, + sourceChainID: vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersPrimary, blockTime: reEnableTime.Add(2 * blockGap), diff --git a/utils/snow.go b/utils/snow.go index a03678bc9e..00901fbad5 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -20,8 +20,9 @@ import ( ) var ( - TestCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} + testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} + testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} ) func TestSnowContext() *snow.Context { @@ -31,7 +32,7 @@ func TestSnowContext() *snow.Context { } pk := bls.PublicFromSecretKey(sk) networkID := constants.UnitTestID - chainID := ids.GenerateTestID() + chainID := testChainID ctx := &snow.Context{ NetworkID: networkID, @@ -39,7 +40,7 @@ func TestSnowContext() *snow.Context { ChainID: chainID, NodeID: ids.GenerateTestNodeID(), XChainID: testXChainID, - CChainID: TestCChainID, + CChainID: testCChainID, NetworkUpgrades: upgradetest.GetConfig(upgradetest.Latest), PublicKey: pk, WarpSigner: warp.NewSigner(sk, networkID, chainID), @@ -51,8 +52,8 @@ func TestSnowContext() *snow.Context { } aliaser := ctx.BCLookup.(ids.Aliaser) - _ = aliaser.Alias(TestCChainID, "C") - _ = aliaser.Alias(TestCChainID, TestCChainID.String()) + _ = aliaser.Alias(testCChainID, "C") + _ = aliaser.Alias(testCChainID, testCChainID.String()) _ = aliaser.Alias(testXChainID, "X") _ = aliaser.Alias(testXChainID, testXChainID.String()) @@ -68,7 +69,7 @@ func NewTestValidatorState() *validatorstest.State { subnetID, ok := map[ids.ID]ids.ID{ constants.PlatformChainID: constants.PrimaryNetworkID, testXChainID: constants.PrimaryNetworkID, - TestCChainID: constants.PrimaryNetworkID, + testCChainID: constants.PrimaryNetworkID, }[chainID] if !ok { return ids.Empty, errors.New("unknown chain") From 6468d363877db5b0e66706ade83f028da1a32487 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 6 Nov 2024 01:29:41 +0300 Subject: [PATCH 78/98] use interfaces from pkgs --- plugin/evm/validators/interfaces/interface.go | 11 +++++++++++ plugin/evm/validators/state.go | 11 ++--------- plugin/evm/vm.go | 8 +++++--- plugin/evm/vm_validators_state_test.go | 19 ++++++++++--------- 4 files changed, 28 insertions(+), 21 deletions(-) diff --git a/plugin/evm/validators/interfaces/interface.go b/plugin/evm/validators/interfaces/interface.go index 197ab553a1..ab59b948c1 100644 --- a/plugin/evm/validators/interfaces/interface.go +++ b/plugin/evm/validators/interfaces/interface.go @@ -4,6 +4,8 @@ package interfaces import ( + "time" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/set" @@ -27,6 +29,8 @@ type State interface { GetValidationIDs() set.Set[ids.ID] // GetValidatorIDs returns the validator node IDs in the state GetValidatorIDs() set.Set[ids.NodeID] + // GetValidator returns the validator data for the given nodeID + GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) // RegisterListener registers a listener to the state RegisterListener(StateCallbackListener) @@ -41,3 +45,10 @@ type StateCallbackListener interface { // OnValidatorStatusUpdated is called when a validator status is updated OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) } + +type ValidatorOutput struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + StartTime time.Time `json:"startTime"` + IsActive bool `json:"isActive"` +} diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index 656654dd1e..52edcd0710 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -25,13 +25,6 @@ const ( deleted dbUpdateStatus = false ) -type ValidatorOutput struct { - ValidationID ids.ID `json:"validationID"` - NodeID ids.NodeID `json:"nodeID"` - StartTime time.Time `json:"startTime"` - IsActive bool `json:"isActive"` -} - type validatorData struct { UpDuration time.Duration `serialize:"true"` LastUpdated uint64 `serialize:"true"` @@ -217,12 +210,12 @@ func (s *state) GetValidatorIDs() set.Set[ids.NodeID] { } // GetValidator returns the validator data for the given nodeID -func (s *state) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { +func (s *state) GetValidator(nodeID ids.NodeID) (*interfaces.ValidatorOutput, error) { data, err := s.getData(nodeID) if err != nil { return nil, err } - return &ValidatorOutput{ + return &interfaces.ValidatorOutput{ ValidationID: data.validationID, NodeID: data.NodeID, StartTime: data.getStartTime(), diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index df294d73b9..de04fa6fe8 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -42,7 +42,9 @@ import ( "github.com/ava-labs/subnet-evm/peer" "github.com/ava-labs/subnet-evm/plugin/evm/message" "github.com/ava-labs/subnet-evm/plugin/evm/uptime" + uptimeinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/uptime/interfaces" "github.com/ava-labs/subnet-evm/plugin/evm/validators" + validatorsinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/ava-labs/subnet-evm/triedb" "github.com/ava-labs/subnet-evm/triedb/hashdb" @@ -266,8 +268,8 @@ type VM struct { ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper - uptimeManager uptime.PausableManager - validatorState validators.State + uptimeManager uptimeinterfaces.PausableManager + validatorState validatorsinterfaces.State chainAlias string // RPC handlers (should be stopped before closing chaindb) @@ -1461,7 +1463,7 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { } // loadValidators loads the [validators] into the validator state [validatorState] -func loadValidators(validatorState validators.State, validators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { +func loadValidators(validatorState validatorsinterfaces.State, validators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { currentValidationIDs := validatorState.GetValidationIDs() // first check if we need to delete any existing validators for vID := range currentValidationIDs { diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index 00d7c9df7c..cec73feeca 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -15,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/plugin/evm/validators" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -171,13 +172,13 @@ func TestLoadNewValidators(t *testing.T) { name string initialValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput newValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput - registerMockListenerCalls func(*validators.MockStateCallbackListener) + registerMockListenerCalls func(*interfaces.MockStateCallbackListener) }{ { name: "before empty/after empty", initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, - registerMockListenerCalls: func(*validators.MockStateCallbackListener) {}, + registerMockListenerCalls: func(*interfaces.MockStateCallbackListener) {}, }, { name: "before empty/after one", @@ -189,7 +190,7 @@ func TestLoadNewValidators(t *testing.T) { StartTime: 0, }, }, - registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) }, }, @@ -203,7 +204,7 @@ func TestLoadNewValidators(t *testing.T) { }, }, newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, - registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { // initial validator will trigger first mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) // then it will be removed @@ -226,7 +227,7 @@ func TestLoadNewValidators(t *testing.T) { StartTime: 0, }, }, - registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) }, }, @@ -251,7 +252,7 @@ func TestLoadNewValidators(t *testing.T) { StartTime: 0, }, }, - registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { // initial validator will trigger first mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) // then it will be updated @@ -276,7 +277,7 @@ func TestLoadNewValidators(t *testing.T) { StartTime: 0, }, }, - registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { // initial validator will trigger first mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) // then it will be removed @@ -301,7 +302,7 @@ func TestLoadNewValidators(t *testing.T) { StartTime: 0, }, }, - registerMockListenerCalls: func(mock *validators.MockStateCallbackListener) { + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { // initial validator will trigger first mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) // then it won't be called since we don't track the node ID changes @@ -323,7 +324,7 @@ func TestLoadNewValidators(t *testing.T) { } // enable mock listener ctrl := gomock.NewController(tt) - mockListener := validators.NewMockStateCallbackListener(ctrl) + mockListener := interfaces.NewMockStateCallbackListener(ctrl) test.registerMockListenerCalls(mockListener) validatorState.RegisterListener(mockListener) From 10c4bd4904f2753cbada88824d817b178cbe14a5 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 6 Nov 2024 13:41:54 +0300 Subject: [PATCH 79/98] improve comments --- plugin/evm/vm.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index de04fa6fe8..6d5937f546 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1337,12 +1337,14 @@ func (vm *VM) initializeDBs(avaDB database.Database) error { vm.db = versiondb.New(db) vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) - // Note warpDB is not part of versiondb because it is not necessary - // that warp signatures are committed to the database atomically with + // Note warpDB and validatorsDB are not part of versiondb because it is not necessary + // that they are committed to the database atomically with // the last accepted block. // [warpDB] is used to store warp message signatures // set to a prefixDB with the prefix [warpPrefix] vm.warpDB = prefixdb.New(warpPrefix, db) + // [validatorsDB] is used to store the current validator set and uptimes + // set to a prefixDB with the prefix [validatorsDBPrefix] vm.validatorsDB = prefixdb.New(validatorsDBPrefix, db) return nil } From 3912f84bced5ec0071e520e553bbbee76bec27b4 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 6 Nov 2024 18:08:08 +0300 Subject: [PATCH 80/98] Uptime validation nits (#1378) * add uptime warp example * remove log * nit unused interface * add weight and isSov as fields * use validator struct in AddValidator * add comments to example file * fix test * add new fields to tests --------- Signed-off-by: Ceyhun Onur --- examples/sign-uptime-message/main.go | 122 +++++++++++++++++++ plugin/evm/service.go | 6 +- plugin/evm/validators/locked_state_reader.go | 2 +- plugin/evm/validators/noop_state.go | 4 +- plugin/evm/validators/state.go | 47 ++++--- plugin/evm/validators/state_test.go | 80 ++++++++++-- plugin/evm/validators/validator.go | 21 ++++ plugin/evm/vm.go | 17 ++- plugin/evm/vm_validators_state_test.go | 9 +- warp/messages/payload.go | 6 - warp/verifier_backend_test.go | 9 +- 11 files changed, 271 insertions(+), 52 deletions(-) create mode 100644 examples/sign-uptime-message/main.go create mode 100644 plugin/evm/validators/validator.go diff --git a/examples/sign-uptime-message/main.go b/examples/sign-uptime-message/main.go new file mode 100644 index 0000000000..01d4172195 --- /dev/null +++ b/examples/sign-uptime-message/main.go @@ -0,0 +1,122 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "net/netip" + "time" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/peer" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/snow/networking/router" + "github.com/ava-labs/avalanchego/utils/compression" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" + "github.com/ava-labs/subnet-evm/warp/messages" + + p2pmessage "github.com/ava-labs/avalanchego/message" +) + +func main() { + uri := primary.LocalAPIURI + // The following IDs are placeholders and should be replaced with real values + // before running the code. + // The validationID is for the validation period that the uptime message is signed for. + validationID := ids.FromStringOrPanic("p3NUAY4PbcAnyCyvUTjGVjezNEQCdnVdfAbJcZScvKpxP5tJr") + // The sourceChainID is the ID of the chain. + sourceChainID := ids.FromStringOrPanic("2UZWB4xjNadRcHSpXarQoCryiVdcGWoT5w1dUztNfMKkAd2hJX") + reqUptime := uint64(3486) + infoClient := info.NewClient(uri) + networkID, err := infoClient.GetNetworkID(context.Background()) + if err != nil { + log.Fatalf("failed to fetch network ID: %s\n", err) + } + + validatorUptime, err := messages.NewValidatorUptime(validationID, reqUptime) + if err != nil { + log.Fatalf("failed to create validatorUptime message: %s\n", err) + } + + addressedCall, err := payload.NewAddressedCall( + nil, + validatorUptime.Bytes(), + ) + if err != nil { + log.Fatalf("failed to create AddressedCall message: %s\n", err) + } + + unsignedWarp, err := warp.NewUnsignedMessage( + networkID, + sourceChainID, + addressedCall.Bytes(), + ) + if err != nil { + log.Fatalf("failed to create unsigned Warp message: %s\n", err) + } + + p, err := peer.StartTestPeer( + context.Background(), + netip.AddrPortFrom( + netip.AddrFrom4([4]byte{127, 0, 0, 1}), + 9651, + ), + networkID, + router.InboundHandlerFunc(func(_ context.Context, msg p2pmessage.InboundMessage) { + log.Printf("received %s: %s", msg.Op(), msg.Message()) + }), + ) + if err != nil { + log.Fatalf("failed to start peer: %s\n", err) + } + + messageBuilder, err := p2pmessage.NewCreator( + logging.NoLog{}, + prometheus.NewRegistry(), + compression.TypeZstd, + time.Hour, + ) + if err != nil { + log.Fatalf("failed to create message builder: %s\n", err) + } + + appRequestPayload, err := proto.Marshal(&sdk.SignatureRequest{ + Message: unsignedWarp.Bytes(), + }) + if err != nil { + log.Fatalf("failed to marshal SignatureRequest: %s\n", err) + } + + appRequest, err := messageBuilder.AppRequest( + sourceChainID, + 0, + time.Hour, + p2p.PrefixMessage( + p2p.ProtocolPrefix(p2p.SignatureRequestHandlerID), + appRequestPayload, + ), + ) + if err != nil { + log.Fatalf("failed to create AppRequest: %s\n", err) + } + + p.Send(context.Background(), appRequest) + + time.Sleep(5 * time.Second) + + p.StartClose() + err = p.AwaitClosed(context.Background()) + if err != nil { + log.Fatalf("failed to close peer: %s\n", err) + } +} diff --git a/plugin/evm/service.go b/plugin/evm/service.go index f434b09ca3..7045994bfa 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -27,8 +27,10 @@ type GetCurrentValidatorsResponse struct { type CurrentValidator struct { ValidationID ids.ID `json:"validationID"` NodeID ids.NodeID `json:"nodeID"` + Weight uint64 `json:"weight"` StartTime time.Time `json:"startTime"` IsActive bool `json:"isActive"` + IsSoV bool `json:"isSoV"` IsConnected bool `json:"isConnected"` Uptime time.Duration `json:"uptime"` } @@ -63,8 +65,10 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrent reply.Validators = append(reply.Validators, CurrentValidator{ ValidationID: validator.ValidationID, NodeID: nodeID, - StartTime: validator.StartTime, + StartTime: validator.StartTime(), + Weight: validator.Weight, IsActive: validator.IsActive, + IsSoV: validator.IsSoV, IsConnected: isConnected, Uptime: time.Duration(uptime.Seconds()), }) diff --git a/plugin/evm/validators/locked_state_reader.go b/plugin/evm/validators/locked_state_reader.go index 022c7e9507..6746326af9 100644 --- a/plugin/evm/validators/locked_state_reader.go +++ b/plugin/evm/validators/locked_state_reader.go @@ -40,7 +40,7 @@ func (s *lockedStateReader) GetNodeIDs() set.Set[ids.NodeID] { return s.s.GetNodeIDs() } -func (s *lockedStateReader) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { +func (s *lockedStateReader) GetValidator(nodeID ids.NodeID) (*Validator, error) { s.lock.Lock() defer s.lock.Unlock() diff --git a/plugin/evm/validators/noop_state.go b/plugin/evm/validators/noop_state.go index 822f9be4a0..35356d4188 100644 --- a/plugin/evm/validators/noop_state.go +++ b/plugin/evm/validators/noop_state.go @@ -17,7 +17,7 @@ func (n *noOpState) GetValidationIDs() set.Set[ids.ID] { return set.NewSet[ids.I func (n *noOpState) GetNodeIDs() set.Set[ids.NodeID] { return set.NewSet[ids.NodeID](0) } -func (n *noOpState) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { +func (n *noOpState) GetValidator(nodeID ids.NodeID) (*Validator, error) { return nil, nil } @@ -45,7 +45,7 @@ func (n *noOpState) GetStartTime( return time.Time{}, nil } -func (n *noOpState) AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { +func (n *noOpState) AddValidator(vdr Validator) error { return nil } diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index 135a4b7f7d..606d43b1a4 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -32,7 +32,7 @@ type StateReader interface { // GetNodeIDs returns the validator node IDs in the state GetNodeIDs() set.Set[ids.NodeID] // GetValidator returns the validator data for the given nodeID - GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) + GetValidator(nodeID ids.NodeID) (*Validator, error) // GetNodeID returns the node ID for the given validation ID GetNodeID(vID ids.ID) (ids.NodeID, error) } @@ -41,7 +41,7 @@ type State interface { uptime.State StateReader // AddValidator adds a new validator to the state - AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error + AddValidator(vdr Validator) error // DeleteValidator deletes the validator from the state DeleteValidator(vID ids.ID) error // WriteState writes the validator state to the disk @@ -64,19 +64,14 @@ type StateCallbackListener interface { OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) } -type ValidatorOutput struct { - ValidationID ids.ID `json:"validationID"` - NodeID ids.NodeID `json:"nodeID"` - StartTime time.Time `json:"startTime"` - IsActive bool `json:"isActive"` -} - type validatorData struct { UpDuration time.Duration `serialize:"true"` LastUpdated uint64 `serialize:"true"` NodeID ids.NodeID `serialize:"true"` + Weight uint64 `serialize:"true"` StartTime uint64 `serialize:"true"` IsActive bool `serialize:"true"` + IsSoV bool `serialize:"true"` validationID ids.ID // database key } @@ -144,23 +139,25 @@ func (s *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { // AddValidator adds a new validator to the state // the new validator is marked as updated and will be written to the disk when WriteState is called -func (s *state) AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { +func (s *state) AddValidator(vdr Validator) error { data := &validatorData{ - NodeID: nodeID, - validationID: vID, - IsActive: isActive, - StartTime: startTimestamp, + NodeID: vdr.NodeID, + validationID: vdr.ValidationID, + IsActive: vdr.IsActive, + StartTime: vdr.StartTimestamp, UpDuration: 0, - LastUpdated: startTimestamp, + LastUpdated: vdr.StartTimestamp, + IsSoV: vdr.IsSoV, + Weight: vdr.Weight, } - if err := s.addData(vID, data); err != nil { + if err := s.addData(vdr.ValidationID, data); err != nil { return err } - s.updatedData[vID] = updated + s.updatedData[vdr.ValidationID] = updated for _, listener := range s.listeners { - listener.OnValidatorAdded(vID, nodeID, startTimestamp, isActive) + listener.OnValidatorAdded(vdr.ValidationID, vdr.NodeID, vdr.StartTimestamp, vdr.IsActive) } return nil } @@ -256,16 +253,18 @@ func (s *state) GetNodeIDs() set.Set[ids.NodeID] { } // GetValidator returns the validator data for the given nodeID -func (s *state) GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) { +func (s *state) GetValidator(nodeID ids.NodeID) (*Validator, error) { data, err := s.getData(nodeID) if err != nil { return nil, err } - return &ValidatorOutput{ - ValidationID: data.validationID, - NodeID: data.NodeID, - StartTime: data.getStartTime(), - IsActive: data.IsActive, + return &Validator{ + ValidationID: data.validationID, + NodeID: data.NodeID, + StartTimestamp: data.StartTime, + IsActive: data.IsActive, + Weight: data.Weight, + IsSoV: data.IsSoV, }, nil } diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index ecfd7d34a9..432ac29438 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -35,13 +35,34 @@ func TestState(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) // add new validator - state.AddValidator(vID, nodeID, uint64(startTime.Unix()), true) + state.AddValidator(Validator{ + ValidationID: vID, + NodeID: nodeID, + Weight: 1, + StartTimestamp: uint64(startTime.Unix()), + IsActive: true, + IsSoV: true, + }) // adding the same validator should fail - err = state.AddValidator(vID, ids.GenerateTestNodeID(), uint64(startTime.Unix()), true) + err = state.AddValidator(Validator{ + ValidationID: vID, + NodeID: ids.GenerateTestNodeID(), + Weight: 1, + StartTimestamp: uint64(startTime.Unix()), + IsActive: true, + IsSoV: true, + }) require.ErrorIs(err, ErrAlreadyExists) // adding the same nodeID should fail - err = state.AddValidator(ids.GenerateTestID(), nodeID, uint64(startTime.Unix()), true) + err = state.AddValidator(Validator{ + ValidationID: ids.GenerateTestID(), + NodeID: nodeID, + Weight: 1, + StartTimestamp: uint64(startTime.Unix()), + IsActive: true, + IsSoV: true, + }) require.ErrorIs(err, ErrAlreadyExists) // get uptime @@ -87,7 +108,14 @@ func TestWriteValidator(t *testing.T) { nodeID := ids.GenerateTestNodeID() vID := ids.GenerateTestID() startTime := time.Now() - require.NoError(state.AddValidator(vID, nodeID, uint64(startTime.Unix()), true)) + require.NoError(state.AddValidator(Validator{ + ValidationID: vID, + NodeID: nodeID, + Weight: 1, + StartTimestamp: uint64(startTime.Unix()), + IsActive: true, + IsSoV: true, + })) // write state, should reflect to DB require.NoError(state.WriteState()) @@ -131,8 +159,14 @@ func TestParseValidator(t *testing.T) { name: "nil", bytes: nil, expected: &validatorData{ - LastUpdated: 0, - StartTime: 0, + LastUpdated: 0, + StartTime: 0, + validationID: ids.Empty, + NodeID: ids.EmptyNodeID, + UpDuration: 0, + Weight: 0, + IsActive: false, + IsSoV: false, }, expectedErr: nil, }, @@ -140,8 +174,14 @@ func TestParseValidator(t *testing.T) { name: "empty", bytes: []byte{}, expected: &validatorData{ - LastUpdated: 0, - StartTime: 0, + LastUpdated: 0, + StartTime: 0, + validationID: ids.Empty, + NodeID: ids.EmptyNodeID, + UpDuration: 0, + Weight: 0, + IsActive: false, + IsSoV: false, }, expectedErr: nil, }, @@ -158,10 +198,14 @@ func TestParseValidator(t *testing.T) { 0x7e, 0xef, 0xe8, 0x8a, 0x45, 0xfb, 0x7a, 0xc4, 0xb0, 0x59, 0xc9, 0x33, 0x71, 0x0a, 0x57, 0x33, 0xff, 0x9f, 0x4b, 0xab, + // weight + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start time 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, // status 0x01, + // is SoV + 0x01, }, expected: &validatorData{ UpDuration: time.Duration(6000000), @@ -169,6 +213,8 @@ func TestParseValidator(t *testing.T) { NodeID: testNodeID, StartTime: 6000000, IsActive: true, + Weight: 1, + IsSoV: true, }, }, { @@ -230,7 +276,14 @@ func TestStateListener(t *testing.T) { initialStartTime := time.Now() // add initial validator - require.NoError(state.AddValidator(initialvID, initialNodeID, uint64(initialStartTime.Unix()), true)) + require.NoError(state.AddValidator(Validator{ + ValidationID: initialvID, + NodeID: initialNodeID, + Weight: 1, + StartTimestamp: uint64(initialStartTime.Unix()), + IsActive: true, + IsSoV: true, + })) // register listener mockListener.EXPECT().OnValidatorAdded(initialvID, initialNodeID, uint64(initialStartTime.Unix()), true) @@ -238,7 +291,14 @@ func TestStateListener(t *testing.T) { // add new validator mockListener.EXPECT().OnValidatorAdded(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) - require.NoError(state.AddValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true)) + require.NoError(state.AddValidator(Validator{ + ValidationID: expectedvID, + NodeID: expectedNodeID, + Weight: 1, + StartTimestamp: uint64(expectedStartTime.Unix()), + IsActive: true, + IsSoV: true, + })) // set status mockListener.EXPECT().OnValidatorStatusUpdated(expectedvID, expectedNodeID, false) diff --git a/plugin/evm/validators/validator.go b/plugin/evm/validators/validator.go new file mode 100644 index 0000000000..a5d81fff25 --- /dev/null +++ b/plugin/evm/validators/validator.go @@ -0,0 +1,21 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "time" + + ids "github.com/ava-labs/avalanchego/ids" +) + +type Validator struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + Weight uint64 `json:"weight"` + StartTimestamp uint64 `json:"startTimestamp"` + IsActive bool `json:"isActive"` + IsSoV bool `json:"isSoV"` +} + +func (v *Validator) StartTime() time.Time { return time.Unix(int64(v.StartTimestamp), 0) } diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index b6b2ed97f6..c473be17e0 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -1461,8 +1461,6 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { return fmt.Errorf("failed to get current validator set: %w", err) } - log.Info("updating validators", "validatorSet", currentValidatorSet) - // load the current validator set into the validator state if err := loadValidators(vm.validatorState, currentValidatorSet); err != nil { return fmt.Errorf("failed to load current validators: %w", err) @@ -1479,19 +1477,19 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { } // loadValidators loads the [validators] into the validator state [validatorState] -func loadValidators(validatorState validators.State, validators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { +func loadValidators(validatorState validators.State, vdrs map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { currentValidationIDs := validatorState.GetValidationIDs() // first check if we need to delete any existing validators for vID := range currentValidationIDs { // if the validator is not in the new set of validators // delete the validator - if _, exists := validators[vID]; !exists { + if _, exists := vdrs[vID]; !exists { validatorState.DeleteValidator(vID) } } // then load the new validators - for vID, vdr := range validators { + for vID, vdr := range vdrs { if currentValidationIDs.Contains(vID) { // Check if IsActive has changed isActive, err := validatorState.GetStatus(vID) @@ -1504,7 +1502,14 @@ func loadValidators(validatorState validators.State, validators map[ids.ID]*aval } } } else { - if err := validatorState.AddValidator(vID, vdr.NodeID, vdr.StartTime, vdr.IsActive); err != nil { + if err := validatorState.AddValidator(validators.Validator{ + ValidationID: vID, + NodeID: vdr.NodeID, + Weight: vdr.Weight, + StartTimestamp: vdr.StartTime, + IsActive: vdr.IsActive, + IsSoV: vdr.IsSoV, + }); err != nil { return err } } diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index f850b58a28..669003ba7a 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -319,7 +319,14 @@ func TestLoadNewValidators(t *testing.T) { // set initial validators for vID, validator := range test.initialValidators { - err := validatorState.AddValidator(vID, validator.NodeID, validator.StartTime, validator.IsActive) + err := validatorState.AddValidator(validators.Validator{ + ValidationID: vID, + NodeID: validator.NodeID, + Weight: validator.Weight, + StartTimestamp: validator.StartTime, + IsActive: validator.IsActive, + IsSoV: validator.IsSoV, + }) require.NoError(err) } // enable mock listener diff --git a/warp/messages/payload.go b/warp/messages/payload.go index 3776a1356d..facf54524d 100644 --- a/warp/messages/payload.go +++ b/warp/messages/payload.go @@ -20,12 +20,6 @@ type Payload interface { initialize(b []byte) } -// Signable is an optional interface that payloads can implement to allow -// on-the-fly signing of incoming messages by the warp backend. -type Signable interface { - VerifyMesssage(sourceAddress []byte) error -} - func Parse(bytes []byte) (Payload, error) { var payload Payload if _, err := Codec.Unmarshal(bytes, &payload); err != nil { diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 952bf8c3f5..a6859a41c8 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -313,7 +313,14 @@ func TestUptimeSignatures(t *testing.T) { // uptime is less than requested (not connected) validationID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() - require.NoError(t, state.AddValidator(validationID, nodeID, clk.Unix(), true)) + require.NoError(t, state.AddValidator(validators.Validator{ + ValidationID: validationID, + NodeID: nodeID, + Weight: 1, + StartTimestamp: clk.Unix(), + IsActive: true, + IsSoV: true, + })) protoBytes, _ = getUptimeMessageBytes([]byte{}, validationID, 80) _, appErr = handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) require.ErrorIs(t, appErr, &common.AppError{Code: VerifyErrCode}) From 2e7f70eaaa993e4a47e1a5c3431352eb0052f004 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Wed, 6 Nov 2024 18:59:53 +0300 Subject: [PATCH 81/98] Update plugin/evm/validators/state.go Co-authored-by: Michael Kaplan <55204436+michaelkaplan13@users.noreply.github.com> Signed-off-by: Ceyhun Onur --- plugin/evm/validators/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index 606d43b1a4..ec8e240afb 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -243,7 +243,7 @@ func (s *state) GetValidationIDs() set.Set[ids.ID] { return ids } -// GetNodeIDs returns the validator IDs in the state +// GetNodeIDs returns the node IDs of validators in the state func (s *state) GetNodeIDs() set.Set[ids.NodeID] { ids := set.NewSet[ids.NodeID](len(s.index)) for nodeID := range s.index { From 3b691739d13391b02250cc95eebd243753d3e219 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 7 Nov 2024 01:15:29 +0300 Subject: [PATCH 82/98] pass locker --- plugin/evm/validators/locked_state_reader.go | 55 -------------------- plugin/evm/vm.go | 5 +- warp/backend.go | 4 ++ warp/backend_test.go | 11 ++-- warp/handlers/signature_request_test.go | 3 +- warp/verifier_backend.go | 2 + warp/verifier_backend_test.go | 5 +- 7 files changed, 20 insertions(+), 65 deletions(-) delete mode 100644 plugin/evm/validators/locked_state_reader.go diff --git a/plugin/evm/validators/locked_state_reader.go b/plugin/evm/validators/locked_state_reader.go deleted file mode 100644 index 6746326af9..0000000000 --- a/plugin/evm/validators/locked_state_reader.go +++ /dev/null @@ -1,55 +0,0 @@ -package validators - -import ( - "sync" - - ids "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -type lockedStateReader struct { - lock sync.Locker - s StateReader -} - -func NewLockedStateReader(lock sync.Locker, s State) StateReader { - return &lockedStateReader{ - lock: lock, - s: s, - } -} - -func (s *lockedStateReader) GetStatus(vID ids.ID) (bool, error) { - s.lock.Lock() - defer s.lock.Unlock() - - return s.s.GetStatus(vID) -} - -func (s *lockedStateReader) GetValidationIDs() set.Set[ids.ID] { - s.lock.Lock() - defer s.lock.Unlock() - - return s.s.GetValidationIDs() -} - -func (s *lockedStateReader) GetNodeIDs() set.Set[ids.NodeID] { - s.lock.Lock() - defer s.lock.Unlock() - - return s.s.GetNodeIDs() -} - -func (s *lockedStateReader) GetValidator(nodeID ids.NodeID) (*Validator, error) { - s.lock.Lock() - defer s.lock.Unlock() - - return s.s.GetValidator(nodeID) -} - -func (s *lockedStateReader) GetNodeID(vID ids.ID) (ids.NodeID, error) { - s.lock.Lock() - defer s.lock.Unlock() - - return s.s.GetNodeID(vID) -} diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index c473be17e0..cc26519c85 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -539,8 +539,9 @@ func (vm *VM) Initialize( vm.ctx.ChainID, vm.ctx.WarpSigner, vm, - vm.lockedUptimeCalculator, - validators.NewLockedStateReader(vm.ctx.Lock.RLocker(), vm.validatorState), + vm.uptimeManager, + vm.validatorState, + vm.ctx.Lock.RLocker(), vm.warpDB, meteredCache, offchainWarpMessages, diff --git a/warp/backend.go b/warp/backend.go index 91c4b5f866..7a9b60e433 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "sync" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" @@ -60,6 +61,7 @@ type backend struct { blockClient BlockClient uptimeCalculator uptime.Calculator validatorState validators.StateReader + stateLock sync.Locker signatureCache cache.Cacher[ids.ID, []byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage @@ -74,6 +76,7 @@ func NewBackend( blockClient BlockClient, uptimeCalculator uptime.Calculator, validatorsState validators.StateReader, + stateLock sync.Locker, db database.Database, signatureCache cache.Cacher[ids.ID, []byte], offchainMessages [][]byte, @@ -87,6 +90,7 @@ func NewBackend( signatureCache: signatureCache, uptimeCalculator: uptimeCalculator, validatorState: validatorsState, + stateLock: stateLock, messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: messageCacheSize}, stats: newVerifierStats(), offchainAddressedCallMsgs: make(map[ids.ID]*avalancheWarp.UnsignedMessage), diff --git a/warp/backend_test.go b/warp/backend_test.go index 842dc116c2..eeb6f06115 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -5,6 +5,7 @@ package warp import ( "context" + "sync" "testing" "github.com/ava-labs/avalanchego/cache" @@ -46,7 +47,7 @@ func TestAddAndGetValidMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -69,7 +70,7 @@ func TestAddAndGetUnknownMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(t, err) // Try getting a signature for a message that was not added. @@ -88,7 +89,7 @@ func TestGetBlockSignature(t *testing.T) { require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, uptime.NoOpCalculator, validators.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(err) blockHashPayload, err := payload.NewHash(blkID) @@ -115,7 +116,7 @@ func TestZeroSizedCache(t *testing.T) { // Verify zero sized cache works normally, because the lru cache will be initialized to size 1 for any size parameter <= 0. messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -169,7 +170,7 @@ func TestOffChainMessages(t *testing.T) { db := memdb.New() messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, db, messageSignatureCache, test.offchainMessages) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, validators.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, test.offchainMessages) require.ErrorIs(err, test.err) if test.check != nil { test.check(require, backend) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index d44e09b4a1..a346d1b0ed 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -35,7 +35,7 @@ func TestMessageSignatureHandler(t *testing.T) { require.NoError(t, err) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, validators.NoOpState, database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) + backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, validators.NoOpState, snowCtx.Lock.RLocker(), database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) @@ -143,6 +143,7 @@ func TestBlockSignatureHandler(t *testing.T) { blockClient, uptime.NoOpCalculator, validators.NoOpState, + snowCtx.Lock.RLocker(), database, messageSignatureCache, nil, diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index 97ae9d93d9..966211c7a0 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -104,6 +104,8 @@ func (b *backend) verifyUptimeMessage(sourceAddress []byte, uptimeMsg *messages. } } + b.stateLock.Lock() + defer b.stateLock.Unlock() // first get the validator's nodeID nodeID, err := b.validatorState.GetNodeID(uptimeMsg.ValidationID) if err != nil { diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index a6859a41c8..db5e0d9952 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -102,7 +102,7 @@ func TestAddressedCallSignatures(t *testing.T) { } else { sigCache = &cache.Empty[ids.ID, []byte]{} } - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, validators.NoOpState, database, sigCache, [][]byte{offchainMessage.Bytes()}) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, validators.NoOpState, snowCtx.Lock.RLocker(), database, sigCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) @@ -219,6 +219,7 @@ func TestBlockSignatures(t *testing.T) { blockClient, uptime.NoOpCalculator, validators.NoOpState, + snowCtx.Lock.RLocker(), database, sigCache, nil, @@ -293,7 +294,7 @@ func TestUptimeSignatures(t *testing.T) { clk := &mockable.Clock{} uptimeManager := uptime.NewManager(state, clk) uptimeManager.StartTracking([]ids.NodeID{}) - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptimeManager, state, database, sigCache, nil) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptimeManager, state, snowCtx.Lock.RLocker(), database, sigCache, nil) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) From 8c7f8d3d7cc2bd50c7118bc7865530fd03ec10b8 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Thu, 7 Nov 2024 15:41:56 +0300 Subject: [PATCH 83/98] rename addresscall verifier fn --- warp/handlers/signature_request_test.go | 3 +-- warp/verifier_backend.go | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index 7430eb0b0b..d27bd1a295 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -15,7 +15,6 @@ import ( avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/plugin/evm/message" - "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/warp" @@ -143,7 +142,7 @@ func TestBlockSignatureHandler(t *testing.T) { warpSigner, blockClient, uptime.NoOpCalculator, - validators.NoOpState, + interfaces.NoOpState, snowCtx.Lock.RLocker(), database, messageSignatureCache, diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index 966211c7a0..12fd16483d 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -39,7 +39,7 @@ func (b *backend) Verify(ctx context.Context, unsignedMessage *avalancheWarp.Uns switch p := parsed.(type) { case *payload.AddressedCall: - return b.verifyAddressedCall(p) + return b.verifyOffchainAddressCall(p) case *payload.Hash: return b.verifyBlockMessage(ctx, p) default: @@ -67,8 +67,8 @@ func (b *backend) verifyBlockMessage(ctx context.Context, blockHashPayload *payl return nil } -// verifyAddressedCall verifies the addressed call message -func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *common.AppError { +// verifyOffchainAddressCall verifies the addressed call message +func (b *backend) verifyOffchainAddressCall(addressedCall *payload.AddressedCall) *common.AppError { // Further, parse the payload to see if it is a known type. parsed, err := messages.Parse(addressedCall.Payload) if err != nil { @@ -79,9 +79,16 @@ func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *com } } + if len(addressedCall.SourceAddress) != 0 { + return &common.AppError{ + Code: VerifyErrCode, + Message: "source address should be empty for offchain addressed messages", + } + } + switch p := parsed.(type) { case *messages.ValidatorUptime: - if err := b.verifyUptimeMessage(addressedCall.SourceAddress, p); err != nil { + if err := b.verifyUptimeMessage(p); err != nil { b.stats.IncUptimeValidationFail() return err } @@ -96,14 +103,7 @@ func (b *backend) verifyAddressedCall(addressedCall *payload.AddressedCall) *com return nil } -func (b *backend) verifyUptimeMessage(sourceAddress []byte, uptimeMsg *messages.ValidatorUptime) *common.AppError { - if len(sourceAddress) != 0 { - return &common.AppError{ - Code: VerifyErrCode, - Message: "source address should be empty for uptime message", - } - } - +func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *common.AppError { b.stateLock.Lock() defer b.stateLock.Unlock() // first get the validator's nodeID From 5974170c3d447901af92083acc8e2f80dff559c0 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 8 Nov 2024 17:39:52 +0300 Subject: [PATCH 84/98] new fields and refactorings --- go.mod | 29 ++++- go.sum | 67 +++++++++- plugin/evm/tx_gossip_test.go | 9 -- plugin/evm/uptime/pausable_manager.go | 14 +-- plugin/evm/uptime/pausable_manager_test.go | 87 ++++++++++++- plugin/evm/validators/interfaces/interface.go | 31 ++--- plugin/evm/validators/state.go | 119 ++++++++++++------ plugin/evm/validators/state_test.go | 97 +++++++++++--- plugin/evm/vm.go | 34 ++--- plugin/evm/vm_test.go | 55 +------- plugin/evm/vm_validators_state_test.go | 50 ++++++-- plugin/evm/vm_warp_test.go | 4 +- scripts/versions.sh | 2 +- utils/snow.go | 46 ++++++- 14 files changed, 461 insertions(+), 183 deletions(-) diff --git a/go.mod b/go.mod index eb7519162f..b329ca1863 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9 + github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 @@ -76,17 +76,23 @@ require ( github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/gorilla/mux v1.8.0 // indirect @@ -96,17 +102,25 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/jackpal/gateway v1.0.6 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.15 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mr-tron/base58 v1.2.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect @@ -124,7 +138,7 @@ require ( github.com/spf13/afero v1.8.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect - github.com/supranational/blst v0.3.11 // indirect + github.com/supranational/blst v0.3.13 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect @@ -141,14 +155,25 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/term v0.23.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gonum.org/v1/gonum v0.11.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect google.golang.org/grpc v1.66.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.29.0 // indirect + k8s.io/apimachinery v0.29.0 // indirect + k8s.io/client-go v0.29.0 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index c5d14ece7a..ca120b2521 100644 --- a/go.sum +++ b/go.sum @@ -58,10 +58,10 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/antithesishq/antithesis-sdk-go v0.3.8 h1:OvGoHxIcOXFJLyn9IJQ5DzByZ3YVAWNBc394ObzDRb8= github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ava-labs/avalanchego v1.11.13-0.20241026214739-acb3d7d102a0 h1:1T9OnvZP6XZ62EVWlfmrI8rrudyE6bM2Zt51pCHfS5o= -github.com/ava-labs/avalanchego v1.11.13-0.20241026214739-acb3d7d102a0/go.mod h1:gYlTU42Q4b29hzhUN22yclym5qwB3Si0jh4+LTn7DZM= -github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9 h1:rX5Xn5WBPppyvvZXp8wwx9dX+4fBY2/XeNIEm18ifzk= -github.com/ava-labs/avalanchego v1.11.13-0.20241029131257-e05aa37abaf9/go.mod h1:gYlTU42Q4b29hzhUN22yclym5qwB3Si0jh4+LTn7DZM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b h1:qvjqst/9X/c4tvI6nFzOwoTeRVIOMPAtEf1liDYrlhI= +github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b/go.mod h1:86tO6F1FT8emclUwdQ2WCwAtAerqjm5A4IbV6XxNUyM= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= @@ -173,6 +173,8 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -214,6 +216,7 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -222,6 +225,12 @@ github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AE github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -280,6 +289,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -290,10 +301,13 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -326,6 +340,7 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -367,9 +382,13 @@ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7Bd github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -406,6 +425,8 @@ github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2 github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -439,13 +460,22 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= @@ -548,6 +578,7 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -557,12 +588,13 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= -github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk= +github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= @@ -742,6 +774,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1022,6 +1056,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1033,6 +1069,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -1050,8 +1087,26 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index a87a7a3896..0eaaa63e6d 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" agoUtils "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/set" @@ -155,14 +154,6 @@ func TestEthTxPushGossipOutbound(t *testing.T) { require := require.New(t) ctx := context.Background() snowCtx := utils.TestSnowContext() - snowCtx.ValidatorState = &validatorstest.State{ - GetCurrentHeightF: func(context.Context) (uint64, error) { - return 0, nil - }, - GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return nil, nil - }, - } sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 1), } diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go index 3976fcae6c..8b81c4264b 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/uptime/pausable_manager.go @@ -58,16 +58,10 @@ func (p *pausableManager) Disconnect(nodeID ids.NodeID) error { return nil } -// StartTracking starts tracking uptime for the nodes with the given IDs -// If a node is paused, it will not be tracked -func (p *pausableManager) StartTracking(nodeIDs []ids.NodeID) error { - activeNodeIDs := make([]ids.NodeID, 0, len(nodeIDs)) - for _, nodeID := range nodeIDs { - if !p.IsPaused(nodeID) { - activeNodeIDs = append(activeNodeIDs, nodeID) - } - } - return p.Manager.StartTracking(activeNodeIDs) +// IsConnected returns true if the node with the given ID is connected to the uptime.Manager +// Note: Inner manager may have a different view of the connection status due to pausing +func (p *pausableManager) IsConnected(nodeID ids.NodeID) bool { + return p.connectedVdrs.Contains(nodeID) } // OnValidatorAdded is called when a validator is added. diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go index e1f4f4a6f8..73e84fb360 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -38,12 +38,14 @@ func TestPausableManager(t *testing.T) { // Elapse Time addTime(clk, time.Second) + // The node was paused before we started tracking + expectedUptime := 1 * time.Second // Start tracking require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) currentTime := addTime(clk, time.Second) - // Uptime should not have increased since the node was paused - expectedUptime := 0 * time.Second + // Uptime should have increased since the node was paused before we started tracking + expectedUptime += 1 * time.Second checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Disconnect @@ -118,11 +120,13 @@ func TestPausableManager(t *testing.T) { // Connect and check uptime addTime(clk, 1*time.Second) + // Uptime should be 1 since the node was paused before we started tracking + expectedUptime := 1 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, clk.Time()) require.NoError(up.Connect(nodeID0)) currentTime := addTime(clk, 2*time.Second) - // Uptime should not have increased since the node was paused - expectedUptime := 0 * time.Second + // Uptime should not have increased since the node was paused after we started tracking checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Disconnect and check uptime @@ -209,6 +213,81 @@ func TestPausableManager(t *testing.T) { checkUptime(t, up, nodeID0, expectedUptime, currentTime) }, }, + { + name: "Case 5: Node paused after we stop tracking", + testFunc: func(t *testing.T, up interfaces.PausableManager, clk *mockable.Clock, s uptime.State) { + require := require.New(t) + + // Start tracking and connect + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + addTime(clk, time.Second) + require.NoError(up.Connect(nodeID0)) + + // Stop tracking + currentTime := addTime(clk, 2*time.Second) + expectedUptime := 2 * time.Second + checkUptime(t, up, nodeID0, expectedUptime, currentTime) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) + + // Pause after a while + currentTime = addTime(clk, 3*time.Second) + // expectedUptime should increase since we stopped tracking + expectedUptime += 3 * time.Second + up.OnValidatorStatusUpdated(vID, nodeID0, false) + require.True(up.IsPaused(nodeID0)) + // expectedUptime should increase since we stopped tracking (even if the node was paused) + currentTime = addTime(clk, 4*time.Second) + expectedUptime += 4 * time.Second + + // Start tracking and check elapsed time + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + // Uptime have increased since the node was paused before we started tracking + // We should be optimistic and assume the node was online and active until we start tracking + require.True(up.IsPaused(nodeID0)) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) + }, + }, + { + name: "Case 6: Paused node got resumed after we stop tracking", + testFunc: func(t *testing.T, up interfaces.PausableManager, clk *mockable.Clock, s uptime.State) { + require := require.New(t) + + // Start tracking and connect + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + addTime(clk, time.Second) + require.NoError(up.Connect(nodeID0)) + + // Pause after a while + currentTime := addTime(clk, 2*time.Second) + // expectedUptime should increase + expectedUptime := 2 * time.Second + up.OnValidatorStatusUpdated(vID, nodeID0, false) + require.True(up.IsPaused(nodeID0)) + checkUptime(t, up, nodeID0, expectedUptime, currentTime) + + // Stop tracking + currentTime = addTime(clk, 3*time.Second) + // expectedUptime should not increase since the node was paused + checkUptime(t, up, nodeID0, expectedUptime, currentTime) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) + + // Resume after a while + currentTime = addTime(clk, 4*time.Second) + // expectedUptime should increase since we stopped tracking + expectedUptime += 4 * time.Second + up.OnValidatorStatusUpdated(vID, nodeID0, true) + require.False(up.IsPaused(nodeID0)) + // expectedUptime should increase since we stopped tracking + currentTime = addTime(clk, 5*time.Second) + expectedUptime += 5 * time.Second + + // Start tracking and check elapsed time + require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + // Uptime should have increased by 4 seconds since the node was resumed + // We should be optimistic and assume the node was online and active until we start tracking + checkUptime(t, up, nodeID0, expectedUptime, currentTime) + }, + }, } for _, test := range tests { diff --git a/plugin/evm/validators/interfaces/interface.go b/plugin/evm/validators/interfaces/interface.go index ab59b948c1..39b6b8c9e9 100644 --- a/plugin/evm/validators/interfaces/interface.go +++ b/plugin/evm/validators/interfaces/interface.go @@ -14,23 +14,20 @@ import ( type State interface { uptime.State // AddValidator adds a new validator to the state - AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error + AddValidator(vdr Validator) error + // UpdateValidator updates the validator in the state + UpdateValidator(vdr Validator) error + // GetValidator returns the validator data for the given validation ID + GetValidator(vID ids.ID) (Validator, error) // DeleteValidator deletes the validator from the state DeleteValidator(vID ids.ID) error // WriteState writes the validator state to the disk WriteState() error - // SetStatus sets the active status of the validator with the given vID - SetStatus(vID ids.ID, isActive bool) error - // GetStatus returns the active status of the validator with the given vID - GetStatus(vID ids.ID) (bool, error) - // GetValidationIDs returns the validation IDs in the state GetValidationIDs() set.Set[ids.ID] - // GetValidatorIDs returns the validator node IDs in the state - GetValidatorIDs() set.Set[ids.NodeID] - // GetValidator returns the validator data for the given nodeID - GetValidator(nodeID ids.NodeID) (*ValidatorOutput, error) + // GetNodeIDs returns the validator node IDs in the state + GetNodeIDs() set.Set[ids.NodeID] // RegisterListener registers a listener to the state RegisterListener(StateCallbackListener) @@ -46,9 +43,13 @@ type StateCallbackListener interface { OnValidatorStatusUpdated(vID ids.ID, nodeID ids.NodeID, isActive bool) } -type ValidatorOutput struct { - ValidationID ids.ID `json:"validationID"` - NodeID ids.NodeID `json:"nodeID"` - StartTime time.Time `json:"startTime"` - IsActive bool `json:"isActive"` +type Validator struct { + ValidationID ids.ID `json:"validationID"` + NodeID ids.NodeID `json:"nodeID"` + Weight uint64 `json:"weight"` + StartTimestamp uint64 `json:"startTimestamp"` + IsActive bool `json:"isActive"` + IsSoV bool `json:"isSoV"` } + +func (v *Validator) StartTime() time.Time { return time.Unix(int64(v.StartTimestamp), 0) } diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index 52edcd0710..8f8279785c 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -18,19 +18,24 @@ var _ uptime.State = &state{} type dbUpdateStatus bool -var ErrAlreadyExists = fmt.Errorf("validator already exists") +var ( + ErrAlreadyExists = fmt.Errorf("validator already exists") + ErrImmutableField = fmt.Errorf("immutable field cannot be updated") +) const ( - updated dbUpdateStatus = true - deleted dbUpdateStatus = false + updatedStatus dbUpdateStatus = true + deletedStatus dbUpdateStatus = false ) type validatorData struct { UpDuration time.Duration `serialize:"true"` LastUpdated uint64 `serialize:"true"` NodeID ids.NodeID `serialize:"true"` + Weight uint64 `serialize:"true"` StartTime uint64 `serialize:"true"` IsActive bool `serialize:"true"` + IsSoV bool `serialize:"true"` validationID ids.ID // database key } @@ -83,7 +88,7 @@ func (s *state) SetUptime( data.UpDuration = upDuration data.setLastUpdated(lastUpdated) - s.updatedData[data.validationID] = updated + s.updatedData[data.validationID] = updatedStatus return nil } @@ -98,23 +103,57 @@ func (s *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { // AddValidator adds a new validator to the state // the new validator is marked as updated and will be written to the disk when WriteState is called -func (s *state) AddValidator(vID ids.ID, nodeID ids.NodeID, startTimestamp uint64, isActive bool) error { +func (s *state) AddValidator(vdr interfaces.Validator) error { data := &validatorData{ - NodeID: nodeID, - validationID: vID, - IsActive: isActive, - StartTime: startTimestamp, + NodeID: vdr.NodeID, + validationID: vdr.ValidationID, + IsActive: vdr.IsActive, + StartTime: vdr.StartTimestamp, UpDuration: 0, - LastUpdated: startTimestamp, + LastUpdated: vdr.StartTimestamp, + IsSoV: vdr.IsSoV, + Weight: vdr.Weight, } - if err := s.addData(vID, data); err != nil { + if err := s.addData(vdr.ValidationID, data); err != nil { return err } - s.updatedData[vID] = updated + s.updatedData[vdr.ValidationID] = updatedStatus for _, listener := range s.listeners { - listener.OnValidatorAdded(vID, nodeID, startTimestamp, isActive) + listener.OnValidatorAdded(vdr.ValidationID, vdr.NodeID, vdr.StartTimestamp, vdr.IsActive) + } + return nil +} + +// UpdateValidator updates the validator in the state +// returns an error if the validator does not exist or if the immutable fields are modified +func (s *state) UpdateValidator(vdr interfaces.Validator) error { + data, exists := s.data[vdr.ValidationID] + if !exists { + return database.ErrNotFound + } + // check immutable fields + if !data.constantsAreUnmodified(vdr) { + return ErrImmutableField + } + // check if mutable fields have changed + updated := false + if data.IsActive != vdr.IsActive { + data.IsActive = vdr.IsActive + updated = true + for _, listener := range s.listeners { + listener.OnValidatorStatusUpdated(data.validationID, data.NodeID, data.IsActive) + } + } + + if data.Weight != vdr.Weight { + data.Weight = vdr.Weight + updated = true + } + + if updated { + s.updatedData[vdr.ValidationID] = updatedStatus } return nil } @@ -130,7 +169,7 @@ func (s *state) DeleteValidator(vID ids.ID) error { delete(s.index, data.NodeID) // mark as deleted for WriteValidator - s.updatedData[data.validationID] = deleted + s.updatedData[data.validationID] = deletedStatus for _, listener := range s.listeners { listener.OnValidatorRemoved(vID, data.NodeID) @@ -144,7 +183,7 @@ func (s *state) WriteState() error { batch := s.db.NewBatch() for vID, updateStatus := range s.updatedData { switch updateStatus { - case updated: + case updatedStatus: data := s.data[vID] dataBytes, err := vdrCodec.Marshal(codecVersion, data) @@ -154,7 +193,7 @@ func (s *state) WriteState() error { if err := batch.Put(vID[:], dataBytes); err != nil { return err } - case deleted: + case deletedStatus: if err := batch.Delete(vID[:]); err != nil { return err } @@ -174,7 +213,7 @@ func (s *state) SetStatus(vID ids.ID, isActive bool) error { return database.ErrNotFound } data.IsActive = isActive - s.updatedData[vID] = updated + s.updatedData[vID] = updatedStatus for _, listener := range s.listeners { listener.OnValidatorStatusUpdated(vID, data.NodeID, isActive) @@ -182,15 +221,6 @@ func (s *state) SetStatus(vID ids.ID, isActive bool) error { return nil } -// GetStatus returns the active status of the validator with the given vID -func (s *state) GetStatus(vID ids.ID) (bool, error) { - data, exists := s.data[vID] - if !exists { - return false, database.ErrNotFound - } - return data.IsActive, nil -} - // GetValidationIDs returns the validation IDs in the state func (s *state) GetValidationIDs() set.Set[ids.ID] { ids := set.NewSet[ids.ID](len(s.data)) @@ -200,8 +230,8 @@ func (s *state) GetValidationIDs() set.Set[ids.ID] { return ids } -// GetValidatorIDs returns the validator IDs in the state -func (s *state) GetValidatorIDs() set.Set[ids.NodeID] { +// GetNodeIDs returns the node IDs of validators in the state +func (s *state) GetNodeIDs() set.Set[ids.NodeID] { ids := set.NewSet[ids.NodeID](len(s.index)) for nodeID := range s.index { ids.Add(nodeID) @@ -209,17 +239,19 @@ func (s *state) GetValidatorIDs() set.Set[ids.NodeID] { return ids } -// GetValidator returns the validator data for the given nodeID -func (s *state) GetValidator(nodeID ids.NodeID) (*interfaces.ValidatorOutput, error) { - data, err := s.getData(nodeID) - if err != nil { - return nil, err +// GetValidator returns the validator data for the given validationID +func (s *state) GetValidator(vID ids.ID) (interfaces.Validator, error) { + data, ok := s.data[vID] + if !ok { + return interfaces.Validator{}, database.ErrNotFound } - return &interfaces.ValidatorOutput{ - ValidationID: data.validationID, - NodeID: data.NodeID, - StartTime: data.getStartTime(), - IsActive: data.IsActive, + return interfaces.Validator{ + ValidationID: data.validationID, + NodeID: data.NodeID, + StartTimestamp: data.StartTime, + IsActive: data.IsActive, + Weight: data.Weight, + IsSoV: data.IsSoV, }, nil } @@ -307,3 +339,14 @@ func (v *validatorData) getLastUpdated() time.Time { func (v *validatorData) getStartTime() time.Time { return time.Unix(int64(v.StartTime), 0) } + +// constantsAreUnmodified returns true if the constants of this validator have +// not been modified compared to the other validator. +func (v *validatorData) constantsAreUnmodified(o interfaces.Validator) bool { + if v.validationID != o.ValidationID { + return true + } + return v.NodeID == o.NodeID && + v.IsSoV == o.IsSoV && + v.StartTime == o.StartTimestamp +} diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state_test.go index e5e8244027..2184a6fbbc 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state_test.go @@ -36,13 +36,23 @@ func TestState(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) // add new validator - state.AddValidator(vID, nodeID, uint64(startTime.Unix()), true) + vdr := interfaces.Validator{ + ValidationID: vID, + NodeID: nodeID, + Weight: 1, + StartTimestamp: uint64(startTime.Unix()), + IsActive: true, + IsSoV: true, + } + state.AddValidator(vdr) // adding the same validator should fail - err = state.AddValidator(vID, ids.GenerateTestNodeID(), uint64(startTime.Unix()), true) + err = state.AddValidator(vdr) require.ErrorIs(err, ErrAlreadyExists) // adding the same nodeID should fail - err = state.AddValidator(ids.GenerateTestID(), nodeID, uint64(startTime.Unix()), true) + newVdr := vdr + newVdr.ValidationID = ids.GenerateTestID() + err = state.AddValidator(newVdr) require.ErrorIs(err, ErrAlreadyExists) // get uptime @@ -62,11 +72,26 @@ func TestState(t *testing.T) { require.Equal(newLastUpdated, lastUpdated) // set status - require.NoError(state.SetStatus(vID, false)) + vdr.IsActive = false + require.NoError(state.UpdateValidator(vdr)) // get status - status, err := state.GetStatus(vID) + data, err := state.GetValidator(vID) + require.NoError(err) + require.False(data.IsActive) + + // set weight + newWeight := uint64(2) + vdr.Weight = newWeight + require.NoError(state.UpdateValidator(vdr)) + // get weight + data, err = state.GetValidator(vID) require.NoError(err) - require.False(status) + require.Equal(newWeight, data.Weight) + + // set a different node ID should fail + newNodeID := ids.GenerateTestNodeID() + vdr.NodeID = newNodeID + require.ErrorIs(state.UpdateValidator(vdr), ErrImmutableField) // delete uptime require.NoError(state.DeleteValidator(vID)) @@ -88,7 +113,14 @@ func TestWriteValidator(t *testing.T) { nodeID := ids.GenerateTestNodeID() vID := ids.GenerateTestID() startTime := time.Now() - require.NoError(state.AddValidator(vID, nodeID, uint64(startTime.Unix()), true)) + require.NoError(state.AddValidator(interfaces.Validator{ + ValidationID: vID, + NodeID: nodeID, + Weight: 1, + StartTimestamp: uint64(startTime.Unix()), + IsActive: true, + IsSoV: true, + })) // write state, should reflect to DB require.NoError(state.WriteState()) @@ -132,8 +164,14 @@ func TestParseValidator(t *testing.T) { name: "nil", bytes: nil, expected: &validatorData{ - LastUpdated: 0, - StartTime: 0, + LastUpdated: 0, + StartTime: 0, + validationID: ids.Empty, + NodeID: ids.EmptyNodeID, + UpDuration: 0, + Weight: 0, + IsActive: false, + IsSoV: false, }, expectedErr: nil, }, @@ -141,8 +179,14 @@ func TestParseValidator(t *testing.T) { name: "empty", bytes: []byte{}, expected: &validatorData{ - LastUpdated: 0, - StartTime: 0, + LastUpdated: 0, + StartTime: 0, + validationID: ids.Empty, + NodeID: ids.EmptyNodeID, + UpDuration: 0, + Weight: 0, + IsActive: false, + IsSoV: false, }, expectedErr: nil, }, @@ -159,10 +203,14 @@ func TestParseValidator(t *testing.T) { 0x7e, 0xef, 0xe8, 0x8a, 0x45, 0xfb, 0x7a, 0xc4, 0xb0, 0x59, 0xc9, 0x33, 0x71, 0x0a, 0x57, 0x33, 0xff, 0x9f, 0x4b, 0xab, + // weight + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // start time 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, // status 0x01, + // is SoV + 0x01, }, expected: &validatorData{ UpDuration: time.Duration(6000000), @@ -170,6 +218,8 @@ func TestParseValidator(t *testing.T) { NodeID: testNodeID, StartTime: 6000000, IsActive: true, + Weight: 1, + IsSoV: true, }, }, { @@ -231,7 +281,14 @@ func TestStateListener(t *testing.T) { initialStartTime := time.Now() // add initial validator - require.NoError(state.AddValidator(initialvID, initialNodeID, uint64(initialStartTime.Unix()), true)) + require.NoError(state.AddValidator(interfaces.Validator{ + ValidationID: initialvID, + NodeID: initialNodeID, + Weight: 1, + StartTimestamp: uint64(initialStartTime.Unix()), + IsActive: true, + IsSoV: true, + })) // register listener mockListener.EXPECT().OnValidatorAdded(initialvID, initialNodeID, uint64(initialStartTime.Unix()), true) @@ -239,11 +296,23 @@ func TestStateListener(t *testing.T) { // add new validator mockListener.EXPECT().OnValidatorAdded(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true) - require.NoError(state.AddValidator(expectedvID, expectedNodeID, uint64(expectedStartTime.Unix()), true)) + vdr := interfaces.Validator{ + ValidationID: expectedvID, + NodeID: expectedNodeID, + Weight: 1, + StartTimestamp: uint64(expectedStartTime.Unix()), + IsActive: true, + IsSoV: true, + } + require.NoError(state.AddValidator(vdr)) // set status mockListener.EXPECT().OnValidatorStatusUpdated(expectedvID, expectedNodeID, false) - require.NoError(state.SetStatus(expectedvID, false)) + vdr.IsActive = false + require.NoError(state.UpdateValidator(vdr)) + + // set status twice should not trigger listener + require.NoError(state.UpdateValidator(vdr)) // remove validator mockListener.EXPECT().OnValidatorRemoved(expectedvID, expectedNodeID) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 6d5937f546..7c2eec1d26 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -511,6 +511,7 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to initialize validator state: %w", err) } + // Initialize uptime manager vm.uptimeManager = uptime.NewPausableManager(avalancheUptime.NewManager(vm.validatorState, &vm.clock)) vm.validatorState.RegisterListener(vm.uptimeManager) @@ -739,7 +740,7 @@ func (vm *VM) onNormalOperationsStarted() error { if err := vm.performValidatorUpdate(ctx); err != nil { return fmt.Errorf("failed to update validators: %w", err) } - vdrIDs := vm.validatorState.GetValidatorIDs().List() + vdrIDs := vm.validatorState.GetNodeIDs().List() // then start tracking with updated validators if err := vm.uptimeManager.StartTracking(vdrIDs); err != nil { return fmt.Errorf("failed to start tracking uptime: %w", err) @@ -880,7 +881,7 @@ func (vm *VM) Shutdown(context.Context) error { vm.cancel() } if vm.bootstrapped.Get() { - vdrIDs := vm.validatorState.GetValidatorIDs().List() + vdrIDs := vm.validatorState.GetNodeIDs().List() if err := vm.uptimeManager.StopTracking(vdrIDs); err != nil { return fmt.Errorf("failed to stop tracking uptime: %w", err) } @@ -1444,7 +1445,7 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { now := time.Now() log.Debug("performing validator update") // get current validator set - currentValidatorSet, _, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) + currentValidatorSet, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) if err != nil { return fmt.Errorf("failed to get current validator set: %w", err) } @@ -1465,32 +1466,33 @@ func (vm *VM) performValidatorUpdate(ctx context.Context) error { } // loadValidators loads the [validators] into the validator state [validatorState] -func loadValidators(validatorState validatorsinterfaces.State, validators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { +func loadValidators(validatorState validatorsinterfaces.State, newValidators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { currentValidationIDs := validatorState.GetValidationIDs() // first check if we need to delete any existing validators for vID := range currentValidationIDs { // if the validator is not in the new set of validators // delete the validator - if _, exists := validators[vID]; !exists { + if _, exists := newValidators[vID]; !exists { validatorState.DeleteValidator(vID) } } // then load the new validators - for vID, vdr := range validators { - if currentValidationIDs.Contains(vID) { - // Check if IsActive has changed - isActive, err := validatorState.GetStatus(vID) - if err != nil { + for newVID, newVdr := range newValidators { + currentVdr := validatorsinterfaces.Validator{ + ValidationID: newVID, + NodeID: newVdr.NodeID, + Weight: newVdr.Weight, + StartTimestamp: newVdr.StartTime, + IsActive: newVdr.IsActive, + IsSoV: newVdr.IsSoV, + } + if currentValidationIDs.Contains(newVID) { + if err := validatorState.UpdateValidator(currentVdr); err != nil { return err } - if isActive != vdr.IsActive { - if err := validatorState.SetStatus(vID, vdr.IsActive); err != nil { - return err - } - } } else { - if err := validatorState.AddValidator(vID, vdr.NodeID, vdr.StartTime, vdr.IsActive); err != nil { + if err := validatorState.AddValidator(currentVdr); err != nil { return err } } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index c3e251e9a4..42e525d656 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -33,11 +33,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" - avalancheConstants "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/components/chain" @@ -63,17 +59,14 @@ import ( "github.com/ava-labs/subnet-evm/vmerrs" avagoconstants "github.com/ava-labs/avalanchego/utils/constants" - avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) var ( - testNetworkID uint32 = avagoconstants.UnitTestID - testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} - testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} - testMinGasPrice int64 = 225_000_000_000 + testNetworkID uint32 = avagoconstants.UnitTestID + + testMinGasPrice int64 = 225_000_000_000 testKeys []*ecdsa.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] - testAvaxAssetID = ids.ID{1, 2, 3} username = "Johns" password = "CjasdjhiPeirbSenfeI13" // #nosec G101 @@ -139,44 +132,6 @@ func buildGenesisTest(t *testing.T, genesisJSON string) []byte { return genesisBytes } -func NewContext() *snow.Context { - ctx := utils.TestSnowContext() - ctx.NodeID = ids.GenerateTestNodeID() - ctx.NetworkID = testNetworkID - ctx.ChainID = testCChainID - ctx.AVAXAssetID = testAvaxAssetID - ctx.XChainID = testXChainID - aliaser := ctx.BCLookup.(ids.Aliaser) - _ = aliaser.Alias(testCChainID, "C") - _ = aliaser.Alias(testCChainID, testCChainID.String()) - _ = aliaser.Alias(testXChainID, "X") - _ = aliaser.Alias(testXChainID, testXChainID.String()) - ctx.ValidatorState = &validatorstest.State{ - GetCurrentHeightF: func(ctx context.Context) (uint64, error) { return 0, nil }, - GetValidatorSetF: func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return map[ids.NodeID]*validators.GetValidatorOutput{}, nil - }, - GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { - subnetID, ok := map[ids.ID]ids.ID{ - avalancheConstants.PlatformChainID: avalancheConstants.PrimaryNetworkID, - testXChainID: avalancheConstants.PrimaryNetworkID, - testCChainID: avalancheConstants.PrimaryNetworkID, - }[chainID] - if !ok { - return ids.Empty, errors.New("unknown chain") - } - return subnetID, nil - }, - } - blsSecretKey, err := bls.NewSecretKey() - if err != nil { - panic(err) - } - ctx.WarpSigner = avalancheWarp.NewSigner(blsSecretKey, ctx.NetworkID, ctx.ChainID) - ctx.PublicKey = bls.PublicFromSecretKey(blsSecretKey) - return ctx -} - // setupGenesis sets up the genesis // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] func setupGenesis( @@ -192,7 +147,7 @@ func setupGenesis( genesisJSON = genesisJSONLatest } genesisBytes := buildGenesisTest(t, genesisJSON) - ctx := NewContext() + ctx := utils.TestSnowContext() baseDB := memdb.New() @@ -498,7 +453,7 @@ func TestBuildEthTxBlock(t *testing.T) { if err := restartedVM.Initialize( context.Background(), - NewContext(), + utils.TestSnowContext(), dbManager, genesisBytes, []byte(""), diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go index cec73feeca..8bf8c3546f 100644 --- a/plugin/evm/vm_validators_state_test.go +++ b/plugin/evm/vm_validators_state_test.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + "github.com/ava-labs/subnet-evm/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -43,7 +44,7 @@ func TestValidatorState(t *testing.T) { ids.GenerateTestID(), } ctx.ValidatorState = &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, bool, error) { + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], @@ -60,7 +61,7 @@ func TestValidatorState(t *testing.T) { PublicKey: nil, Weight: 1, }, - }, 0, false, nil + }, 0, nil }, } appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } @@ -99,7 +100,7 @@ func TestValidatorState(t *testing.T) { vm = &VM{} err = vm.Initialize( context.Background(), - NewContext(), // this context does not have validators state, making VM to source it from the database + utils.TestSnowContext(), // this context does not have validators state, making VM to source it from the database dbManager, genesisBytes, []byte(""), @@ -118,7 +119,7 @@ func TestValidatorState(t *testing.T) { newValidationID := ids.GenerateTestID() newNodeID := ids.GenerateTestNodeID() testState := &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, bool, error) { + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], @@ -140,7 +141,7 @@ func TestValidatorState(t *testing.T) { PublicKey: nil, Weight: 1, }, - }, 0, false, nil + }, 0, nil }, } vm.ctx.ValidatorState = testState @@ -150,8 +151,8 @@ func TestValidatorState(t *testing.T) { // new validator should be added to the state eventually after validatorsLoadFrequency require.EventuallyWithT(func(c *assert.CollectT) { - assert.Len(c, vm.validatorState.GetValidatorIDs(), 4) - newValidator, err := vm.validatorState.GetValidator(newNodeID) + assert.Len(c, vm.validatorState.GetNodeIDs(), 4) + newValidator, err := vm.validatorState.GetValidator(newValidationID) assert.NoError(c, err) assert.Equal(c, newNodeID, newValidator.NodeID) }, loadValidatorsFrequency*2, 5*time.Second) @@ -173,6 +174,7 @@ func TestLoadNewValidators(t *testing.T) { initialValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput newValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput registerMockListenerCalls func(*interfaces.MockStateCallbackListener) + expectedLoadErr error }{ { name: "before empty/after empty", @@ -232,12 +234,13 @@ func TestLoadNewValidators(t *testing.T) { }, }, { - name: "status change and new one", + name: "status and weight change and new one", initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ testValidationIDs[0]: { NodeID: testNodeIDs[0], IsActive: true, StartTime: 0, + Weight: 1, }, }, newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ @@ -245,6 +248,7 @@ func TestLoadNewValidators(t *testing.T) { NodeID: testNodeIDs[0], IsActive: false, StartTime: 0, + Weight: 2, }, testValidationIDs[1]: { NodeID: testNodeIDs[1], @@ -302,6 +306,7 @@ func TestLoadNewValidators(t *testing.T) { StartTime: 0, }, }, + expectedLoadErr: validators.ErrImmutableField, registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { // initial validator will trigger first mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) @@ -319,7 +324,14 @@ func TestLoadNewValidators(t *testing.T) { // set initial validators for vID, validator := range test.initialValidators { - err := validatorState.AddValidator(vID, validator.NodeID, validator.StartTime, validator.IsActive) + err := validatorState.AddValidator(interfaces.Validator{ + ValidationID: vID, + NodeID: validator.NodeID, + Weight: validator.Weight, + StartTimestamp: validator.StartTime, + IsActive: validator.IsActive, + IsSoV: validator.IsSoV, + }) require.NoError(err) } // enable mock listener @@ -328,8 +340,24 @@ func TestLoadNewValidators(t *testing.T) { test.registerMockListenerCalls(mockListener) validatorState.RegisterListener(mockListener) - require.NoError(loadValidators(validatorState, test.newValidators)) - ctrl.Finish() + // load new validators + err = loadValidators(validatorState, test.newValidators) + if test.expectedLoadErr != nil { + require.Error(err) + return + } + require.NoError(err) + // check if the state is as expected + require.Equal(len(test.newValidators), validatorState.GetValidationIDs().Len()) + for vID, validator := range test.newValidators { + v, err := validatorState.GetValidator(vID) + require.NoError(err) + require.Equal(validator.NodeID, v.NodeID) + require.Equal(validator.Weight, v.Weight) + require.Equal(validator.StartTime, v.StartTimestamp) + require.Equal(validator.IsActive, v.IsActive) + require.Equal(validator.IsSoV, v.IsSoV) + } }) } } diff --git a/plugin/evm/vm_warp_test.go b/plugin/evm/vm_warp_test.go index b80af6fdd5..2131b79d94 100644 --- a/plugin/evm/vm_warp_test.go +++ b/plugin/evm/vm_warp_test.go @@ -481,7 +481,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by subnet without RequirePrimaryNetworkSigners", - sourceChainID: testCChainID, + sourceChainID: vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersSubnet, blockTime: upgrade.InitiallyActiveTime.Add(2 * blockGap), @@ -504,7 +504,7 @@ func TestReceiveWarpMessage(t *testing.T) { }, { name: "C-Chain message should be signed by primary with RequirePrimaryNetworkSigners (impacted)", - sourceChainID: testCChainID, + sourceChainID: vm.ctx.CChainID, msgFrom: fromPrimary, useSigners: signersPrimary, blockTime: reEnableTime.Add(2 * blockGap), diff --git a/scripts/versions.sh b/scripts/versions.sh index fa04254c77..248a2002d8 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'e05aa37a'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'0b7a136f'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier diff --git a/utils/snow.go b/utils/snow.go index 090d38102f..00901fbad5 100644 --- a/utils/snow.go +++ b/utils/snow.go @@ -5,6 +5,7 @@ package utils import ( "context" + "errors" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" @@ -15,6 +16,13 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" +) + +var ( + testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} + testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} + testChainID = ids.ID{'t', 'e', 's', 't', 'c', 'h', 'a', 'i', 'n'} ) func TestSnowContext() *snow.Context { @@ -23,19 +31,33 @@ func TestSnowContext() *snow.Context { panic(err) } pk := bls.PublicFromSecretKey(sk) - return &snow.Context{ - NetworkID: constants.UnitTestID, + networkID := constants.UnitTestID + chainID := testChainID + + ctx := &snow.Context{ + NetworkID: networkID, SubnetID: ids.Empty, - ChainID: ids.Empty, - NodeID: ids.EmptyNodeID, + ChainID: chainID, + NodeID: ids.GenerateTestNodeID(), + XChainID: testXChainID, + CChainID: testCChainID, NetworkUpgrades: upgradetest.GetConfig(upgradetest.Latest), PublicKey: pk, + WarpSigner: warp.NewSigner(sk, networkID, chainID), Log: logging.NoLog{}, BCLookup: ids.NewAliaser(), Metrics: metrics.NewPrefixGatherer(), ChainDataDir: "", ValidatorState: NewTestValidatorState(), } + + aliaser := ctx.BCLookup.(ids.Aliaser) + _ = aliaser.Alias(testCChainID, "C") + _ = aliaser.Alias(testCChainID, testCChainID.String()) + _ = aliaser.Alias(testXChainID, "X") + _ = aliaser.Alias(testXChainID, testXChainID.String()) + + return ctx } func NewTestValidatorState() *validatorstest.State { @@ -43,8 +65,22 @@ func NewTestValidatorState() *validatorstest.State { GetCurrentHeightF: func(context.Context) (uint64, error) { return 0, nil }, + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + testXChainID: constants.PrimaryNetworkID, + testCChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errors.New("unknown chain") + } + return subnetID, nil + }, GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { - return make(map[ids.NodeID]*validators.GetValidatorOutput), nil + return map[ids.NodeID]*validators.GetValidatorOutput{}, nil + }, + GetCurrentValidatorSetF: func(context.Context, ids.ID) (map[ids.ID]*validators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*validators.GetCurrentValidatorOutput{}, 0, nil }, } } From 7dca70e131169189e91d3dd35875e1bd88438789 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 8 Nov 2024 17:47:46 +0300 Subject: [PATCH 85/98] add new fields --- plugin/evm/service.go | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 52ffb11329..3b6959ee59 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -7,9 +7,7 @@ import ( "net/http" "time" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" ) type ValidatorsAPI struct { @@ -27,44 +25,42 @@ type GetCurrentValidatorsResponse struct { type CurrentValidator struct { ValidationID ids.ID `json:"validationID"` NodeID ids.NodeID `json:"nodeID"` + Weight uint64 `json:"weight"` StartTime time.Time `json:"startTime"` IsActive bool `json:"isActive"` + IsSoV bool `json:"isSoV"` IsConnected bool `json:"isConnected"` Uptime time.Duration `json:"uptime"` } -func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsRequest, reply *GetCurrentValidatorsResponse) error { +func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, _ *struct{}, reply *GetCurrentValidatorsResponse) error { api.vm.ctx.Lock.RLock() defer api.vm.ctx.Lock.RUnlock() - nodeIDs := set.Of(args.NodeIDs...) - if nodeIDs.Len() == 0 { - nodeIDs = api.vm.validatorState.GetValidatorIDs() - } + vIDs := api.vm.validatorState.GetValidationIDs() - reply.Validators = make([]CurrentValidator, 0, nodeIDs.Len()) + reply.Validators = make([]CurrentValidator, 0, vIDs.Len()) - for _, nodeID := range nodeIDs.List() { - validator, err := api.vm.validatorState.GetValidator(nodeID) - switch { - case err == database.ErrNotFound: - continue - case err != nil: + for _, vID := range vIDs.List() { + validator, err := api.vm.validatorState.GetValidator(vID) + if err != nil { return err } - isConnected := api.vm.uptimeManager.IsConnected(nodeID) + isConnected := api.vm.uptimeManager.IsConnected(validator.NodeID) - uptime, _, err := api.vm.uptimeManager.CalculateUptime(nodeID) + uptime, _, err := api.vm.uptimeManager.CalculateUptime(validator.NodeID) if err != nil { return err } reply.Validators = append(reply.Validators, CurrentValidator{ ValidationID: validator.ValidationID, - NodeID: nodeID, - StartTime: validator.StartTime, + NodeID: validator.NodeID, + StartTime: validator.StartTime(), + Weight: validator.Weight, IsActive: validator.IsActive, + IsSoV: validator.IsSoV, IsConnected: isConnected, Uptime: time.Duration(uptime.Seconds()), }) From 85db94fde00cb344016d7634547de00e584c613c Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 8 Nov 2024 17:55:40 +0300 Subject: [PATCH 86/98] merge nits --- examples/sign-uptime-message/main.go | 2 ++ .../evm/validators/interfaces/noop_state.go | 34 +++++++++++-------- warp/verifier_backend.go | 9 ++--- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/examples/sign-uptime-message/main.go b/examples/sign-uptime-message/main.go index 01d4172195..94929bc2da 100644 --- a/examples/sign-uptime-message/main.go +++ b/examples/sign-uptime-message/main.go @@ -28,6 +28,8 @@ import ( p2pmessage "github.com/ava-labs/avalanchego/message" ) +// An example application demonstrating how to request a signature for +// an uptime message from a node running locally. func main() { uri := primary.LocalAPIURI // The following IDs are placeholders and should be replaced with real values diff --git a/plugin/evm/validators/interfaces/noop_state.go b/plugin/evm/validators/interfaces/noop_state.go index 23bb4191dd..c5a49475e2 100644 --- a/plugin/evm/validators/interfaces/noop_state.go +++ b/plugin/evm/validators/interfaces/noop_state.go @@ -17,13 +17,30 @@ func (n *noOpState) GetValidationIDs() set.Set[ids.ID] { return set.NewSet[ids.I func (n *noOpState) GetNodeIDs() set.Set[ids.NodeID] { return set.NewSet[ids.NodeID](0) } -func (n *noOpState) GetValidator(nodeID ids.NodeID) (Validator, error) { +func (n *noOpState) GetValidator(vID ids.ID) (Validator, error) { return Validator{}, nil } func (n *noOpState) GetNodeID(vID ids.ID) (ids.NodeID, error) { return ids.NodeID{}, nil } -func (n *noOpState) GetSubnetID(chainID ids.ID) (ids.ID, error) { return ids.ID{}, nil } +func (n *noOpState) AddValidator(vdr Validator) error { + return nil +} + +func (n *noOpState) UpdateValidator(vdr Validator) error { + return nil +} + +func (n *noOpState) DeleteValidator(vID ids.ID) error { + return nil +} +func (n *noOpState) WriteState() error { return nil } + +func (n *noOpState) SetStatus(vID ids.ID, isActive bool) error { return nil } + +func (n *noOpState) SetWeight(vID ids.ID, newWeight uint64) error { return nil } + +func (n *noOpState) RegisterListener(StateCallbackListener) {} func (n *noOpState) GetUptime( nodeID ids.NodeID, @@ -44,16 +61,3 @@ func (n *noOpState) GetStartTime( ) (startTime time.Time, err error) { return time.Time{}, nil } - -func (n *noOpState) AddValidator(vdr Validator) error { - return nil -} - -func (n *noOpState) DeleteValidator(vID ids.ID) error { - return nil -} -func (n *noOpState) WriteState() error { return nil } - -func (n *noOpState) SetStatus(vID ids.ID, isActive bool) error { return nil } - -func (n *noOpState) RegisterListener(StateCallbackListener) {} diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index 12fd16483d..a73a1d1ea9 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -39,7 +39,7 @@ func (b *backend) Verify(ctx context.Context, unsignedMessage *avalancheWarp.Uns switch p := parsed.(type) { case *payload.AddressedCall: - return b.verifyOffchainAddressCall(p) + return b.verifyOffchainAddressedCall(p) case *payload.Hash: return b.verifyBlockMessage(ctx, p) default: @@ -67,8 +67,8 @@ func (b *backend) verifyBlockMessage(ctx context.Context, blockHashPayload *payl return nil } -// verifyOffchainAddressCall verifies the addressed call message -func (b *backend) verifyOffchainAddressCall(addressedCall *payload.AddressedCall) *common.AppError { +// verifyOffchainAddressedCall verifies the addressed call message +func (b *backend) verifyOffchainAddressedCall(addressedCall *payload.AddressedCall) *common.AppError { // Further, parse the payload to see if it is a known type. parsed, err := messages.Parse(addressedCall.Payload) if err != nil { @@ -107,13 +107,14 @@ func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *comm b.stateLock.Lock() defer b.stateLock.Unlock() // first get the validator's nodeID - nodeID, err := b.validatorState.GetNodeID(uptimeMsg.ValidationID) + vdr, err := b.validatorState.GetValidator(uptimeMsg.ValidationID) if err != nil { return &common.AppError{ Code: VerifyErrCode, Message: fmt.Sprintf("failed to get validator for validationID %s: %s", uptimeMsg.ValidationID, err.Error()), } } + nodeID := vdr.NodeID // then get the current uptime currentUptime, _, err := b.uptimeCalculator.CalculateUptime(nodeID) From 424e7a76535270744ba9cc8b01f4cd509971b28b Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 8 Nov 2024 18:00:06 +0300 Subject: [PATCH 87/98] fix linter --- plugin/evm/uptime/pausable_manager_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go index 73e84fb360..ea7508f450 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -230,7 +230,7 @@ func TestPausableManager(t *testing.T) { require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) // Pause after a while - currentTime = addTime(clk, 3*time.Second) + addTime(clk, 3*time.Second) // expectedUptime should increase since we stopped tracking expectedUptime += 3 * time.Second up.OnValidatorStatusUpdated(vID, nodeID0, false) @@ -272,7 +272,7 @@ func TestPausableManager(t *testing.T) { require.NoError(up.StopTracking([]ids.NodeID{nodeID0})) // Resume after a while - currentTime = addTime(clk, 4*time.Second) + addTime(clk, 4*time.Second) // expectedUptime should increase since we stopped tracking expectedUptime += 4 * time.Second up.OnValidatorStatusUpdated(vID, nodeID0, true) From 4fa19b9e2dcbc9a8d60837aa351292bca9f8672a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 8 Nov 2024 19:00:25 +0300 Subject: [PATCH 88/98] clarify comments --- plugin/evm/uptime/pausable_manager_test.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/uptime/pausable_manager_test.go index ea7508f450..15b4c29c33 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/uptime/pausable_manager_test.go @@ -31,6 +31,7 @@ func TestPausableManager(t *testing.T) { // Connect before tracking require.NoError(up.Connect(nodeID0)) addTime(clk, time.Second) + expectedUptime := 1 * time.Second // Pause before tracking up.OnValidatorStatusUpdated(vID, nodeID0, false) @@ -39,13 +40,13 @@ func TestPausableManager(t *testing.T) { // Elapse Time addTime(clk, time.Second) // The node was paused before we started tracking - expectedUptime := 1 * time.Second + expectedUptime += 1 * time.Second // Start tracking require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) + // Uptime here should not increase after start tracking + // since the node is still paused after we started tracking currentTime := addTime(clk, time.Second) - // Uptime should have increased since the node was paused before we started tracking - expectedUptime += 1 * time.Second checkUptime(t, up, nodeID0, expectedUptime, currentTime) // Disconnect @@ -116,12 +117,12 @@ func TestPausableManager(t *testing.T) { // Start tracking addTime(clk, time.Second) + // Uptime should be 1 since the node was paused before we started tracking + expectedUptime := 1 * time.Second require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) // Connect and check uptime addTime(clk, 1*time.Second) - // Uptime should be 1 since the node was paused before we started tracking - expectedUptime := 1 * time.Second checkUptime(t, up, nodeID0, expectedUptime, clk.Time()) require.NoError(up.Connect(nodeID0)) From 3d5261de0368b0752a2138c8f0997b1e1122a356 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 8 Nov 2024 20:50:30 +0300 Subject: [PATCH 89/98] update comment --- plugin/evm/uptime/pausable_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/uptime/pausable_manager.go index 8b81c4264b..6c437dd049 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/uptime/pausable_manager.go @@ -58,7 +58,7 @@ func (p *pausableManager) Disconnect(nodeID ids.NodeID) error { return nil } -// IsConnected returns true if the node with the given ID is connected to the uptime.Manager +// IsConnected returns true if the node with the given ID is connected to this manager // Note: Inner manager may have a different view of the connection status due to pausing func (p *pausableManager) IsConnected(nodeID ids.NodeID) bool { return p.connectedVdrs.Contains(nodeID) From b5db9aea727f9c73ad31944047356db53f1f214e Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 9 Nov 2024 11:49:15 +0300 Subject: [PATCH 90/98] remove getnodeID --- plugin/evm/validators/state.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state.go index 67d7650415..8f8279785c 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state.go @@ -255,15 +255,6 @@ func (s *state) GetValidator(vID ids.ID) (interfaces.Validator, error) { }, nil } -// GetNodeID returns the node ID for the given validation ID -func (s *state) GetNodeID(vID ids.ID) (ids.NodeID, error) { - data, exists := s.data[vID] - if !exists { - return ids.NodeID{}, database.ErrNotFound - } - return data.NodeID, nil -} - // RegisterListener registers a listener to the state // OnValidatorAdded is called for all current validators on the provided listener before this function returns func (s *state) RegisterListener(listener interfaces.StateCallbackListener) { From a80c028bd981c49773179704ccc34dc8221dc77a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Sat, 9 Nov 2024 12:30:35 +0300 Subject: [PATCH 91/98] bump to poc branch --- go.mod | 2 +- go.sum | 4 ++-- scripts/versions.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index b329ca1863..e071c1a37d 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.8 require ( github.com/VictoriaMetrics/fastcache v1.12.1 github.com/antithesishq/antithesis-sdk-go v0.3.8 - github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b + github.com/ava-labs/avalanchego v1.12.0-initial-poc.8.0.20241109084237-90fa892673d1 github.com/cespare/cp v0.1.0 github.com/crate-crypto/go-ipa v0.0.0-20231025140028-3c0104f4b233 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ca120b2521..7b2663f76c 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,8 @@ github.com/antithesishq/antithesis-sdk-go v0.3.8/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b h1:qvjqst/9X/c4tvI6nFzOwoTeRVIOMPAtEf1liDYrlhI= -github.com/ava-labs/avalanchego v1.11.13-0.20241104091041-0b7a136fba6b/go.mod h1:86tO6F1FT8emclUwdQ2WCwAtAerqjm5A4IbV6XxNUyM= +github.com/ava-labs/avalanchego v1.12.0-initial-poc.8.0.20241109084237-90fa892673d1 h1:U+hV28K+PfNP4n69bGUrgNbS446dtWwD3zhGL4HZVPc= +github.com/ava-labs/avalanchego v1.12.0-initial-poc.8.0.20241109084237-90fa892673d1/go.mod h1:86tO6F1FT8emclUwdQ2WCwAtAerqjm5A4IbV6XxNUyM= github.com/ava-labs/coreth v0.13.8 h1:f14X3KgwHl9LwzfxlN6S4bbn5VA2rhEsNnHaRLSTo/8= github.com/ava-labs/coreth v0.13.8/go.mod h1:t3BSv/eQv0AlDPMfEDCMMoD/jq1RkUsbFzQAFg5qBcE= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= diff --git a/scripts/versions.sh b/scripts/versions.sh index 248a2002d8..74c8634ff5 100644 --- a/scripts/versions.sh +++ b/scripts/versions.sh @@ -4,7 +4,7 @@ # shellcheck disable=SC2034 # Don't export them as they're used in the context of other calls -AVALANCHE_VERSION=${AVALANCHE_VERSION:-'0b7a136f'} +AVALANCHE_VERSION=${AVALANCHE_VERSION:-'90fa8926'} GINKGO_VERSION=${GINKGO_VERSION:-'v2.2.0'} # This won't be used, but it's here to make code syncs easier From c39c7e0e6ae669bc156533ce0e2e1eaf253fcfe8 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Tue, 12 Nov 2024 14:44:17 +0300 Subject: [PATCH 92/98] enable validators API by default --- plugin/evm/config.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugin/evm/config.go b/plugin/evm/config.go index f911df75ce..1daa9339a7 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -62,6 +62,7 @@ const ( defaultStateSyncMinBlocks = 300_000 defaultStateSyncRequestSize = 1024 // the number of key/values to ask peers for per request defaultDBType = pebbledb.Name + defaultValidatorAPIEnabled = true ) type PBool bool @@ -298,6 +299,7 @@ func (c *Config) SetDefaults() { c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes c.AcceptedCacheSize = defaultAcceptedCacheSize c.DatabaseType = defaultDBType + c.ValidatorsAPIEnabled = defaultValidatorAPIEnabled } func (d *Duration) UnmarshalJSON(data []byte) (err error) { From 07205f47d746455050de91c3d1f4c88e8bb08e24 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 15 Nov 2024 01:05:19 +0300 Subject: [PATCH 93/98] refactor uptime tracking --- plugin/evm/service.go | 8 +- .../evm/validators/interfaces/interfaces.go | 23 ++ .../evm/validators/interfaces/noop_state.go | 63 --- plugin/evm/validators/manager.go | 134 +++++++ plugin/evm/validators/manager_test.go | 221 +++++++++++ plugin/evm/validators/{ => state}/codec.go | 2 +- .../{ => state}/interfaces/mock_listener.go | 0 .../interfaces/state.go} | 19 +- plugin/evm/validators/{ => state}/state.go | 4 +- .../evm/validators/{ => state}/state_test.go | 4 +- .../uptime/interfaces/interface.go | 4 +- .../uptime/pausable_manager.go | 2 +- .../uptime/pausable_manager_test.go | 2 +- plugin/evm/vm.go | 133 +------ plugin/evm/vm_test.go | 143 +++++++ plugin/evm/vm_validators_state_test.go | 363 ------------------ warp/backend.go | 16 +- warp/backend_test.go | 14 +- warp/handlers/signature_request_test.go | 7 +- warp/verifier_backend.go | 8 +- warp/verifier_backend_test.go | 22 +- warp/warptest/noop_validator_reader.go | 42 ++ 22 files changed, 637 insertions(+), 597 deletions(-) create mode 100644 plugin/evm/validators/interfaces/interfaces.go delete mode 100644 plugin/evm/validators/interfaces/noop_state.go create mode 100644 plugin/evm/validators/manager.go create mode 100644 plugin/evm/validators/manager_test.go rename plugin/evm/validators/{ => state}/codec.go (96%) rename plugin/evm/validators/{ => state}/interfaces/mock_listener.go (100%) rename plugin/evm/validators/{interfaces/interface.go => state/interfaces/state.go} (97%) rename plugin/evm/validators/{ => state}/state.go (99%) rename plugin/evm/validators/{ => state}/state_test.go (98%) rename plugin/evm/{ => validators}/uptime/interfaces/interface.go (67%) rename plugin/evm/{ => validators}/uptime/pausable_manager.go (98%) rename plugin/evm/{ => validators}/uptime/pausable_manager_test.go (99%) delete mode 100644 plugin/evm/vm_validators_state_test.go create mode 100644 warp/warptest/noop_validator_reader.go diff --git a/plugin/evm/service.go b/plugin/evm/service.go index 3b6959ee59..98244d7a9b 100644 --- a/plugin/evm/service.go +++ b/plugin/evm/service.go @@ -37,19 +37,19 @@ func (api *ValidatorsAPI) GetCurrentValidators(_ *http.Request, _ *struct{}, rep api.vm.ctx.Lock.RLock() defer api.vm.ctx.Lock.RUnlock() - vIDs := api.vm.validatorState.GetValidationIDs() + vIDs := api.vm.validatorsManager.GetValidationIDs() reply.Validators = make([]CurrentValidator, 0, vIDs.Len()) for _, vID := range vIDs.List() { - validator, err := api.vm.validatorState.GetValidator(vID) + validator, err := api.vm.validatorsManager.GetValidator(vID) if err != nil { return err } - isConnected := api.vm.uptimeManager.IsConnected(validator.NodeID) + isConnected := api.vm.validatorsManager.IsConnected(validator.NodeID) - uptime, _, err := api.vm.uptimeManager.CalculateUptime(validator.NodeID) + uptime, _, err := api.vm.validatorsManager.CalculateUptime(validator.NodeID) if err != nil { return err } diff --git a/plugin/evm/validators/interfaces/interfaces.go b/plugin/evm/validators/interfaces/interfaces.go new file mode 100644 index 0000000000..338833135c --- /dev/null +++ b/plugin/evm/validators/interfaces/interfaces.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package interfaces + +import ( + "context" + + avalancheuptime "github.com/ava-labs/avalanchego/snow/uptime" + stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" +) + +type ValidatorReader interface { + stateinterfaces.StateReader + avalancheuptime.Calculator +} + +type Manager interface { + stateinterfaces.State + avalancheuptime.Manager + Sync(ctx context.Context) error + DispatchSync(ctx context.Context) +} diff --git a/plugin/evm/validators/interfaces/noop_state.go b/plugin/evm/validators/interfaces/noop_state.go deleted file mode 100644 index c5a49475e2..0000000000 --- a/plugin/evm/validators/interfaces/noop_state.go +++ /dev/null @@ -1,63 +0,0 @@ -package interfaces - -import ( - "time" - - ids "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" -) - -var NoOpState State = &noOpState{} - -type noOpState struct{} - -func (n *noOpState) GetStatus(vID ids.ID) (bool, error) { return false, nil } - -func (n *noOpState) GetValidationIDs() set.Set[ids.ID] { return set.NewSet[ids.ID](0) } - -func (n *noOpState) GetNodeIDs() set.Set[ids.NodeID] { return set.NewSet[ids.NodeID](0) } - -func (n *noOpState) GetValidator(vID ids.ID) (Validator, error) { - return Validator{}, nil -} - -func (n *noOpState) GetNodeID(vID ids.ID) (ids.NodeID, error) { return ids.NodeID{}, nil } - -func (n *noOpState) AddValidator(vdr Validator) error { - return nil -} - -func (n *noOpState) UpdateValidator(vdr Validator) error { - return nil -} - -func (n *noOpState) DeleteValidator(vID ids.ID) error { - return nil -} -func (n *noOpState) WriteState() error { return nil } - -func (n *noOpState) SetStatus(vID ids.ID, isActive bool) error { return nil } - -func (n *noOpState) SetWeight(vID ids.ID, newWeight uint64) error { return nil } - -func (n *noOpState) RegisterListener(StateCallbackListener) {} - -func (n *noOpState) GetUptime( - nodeID ids.NodeID, -) (upDuration time.Duration, lastUpdated time.Time, err error) { - return 0, time.Time{}, nil -} - -func (n *noOpState) SetUptime( - nodeID ids.NodeID, - upDuration time.Duration, - lastUpdated time.Time, -) error { - return nil -} - -func (n *noOpState) GetStartTime( - nodeID ids.NodeID, -) (startTime time.Time, err error) { - return time.Time{}, nil -} diff --git a/plugin/evm/validators/manager.go b/plugin/evm/validators/manager.go new file mode 100644 index 0000000000..1cc6f24c30 --- /dev/null +++ b/plugin/evm/validators/manager.go @@ -0,0 +1,134 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + avalancheuptime "github.com/ava-labs/avalanchego/snow/uptime" + avalanchevalidators "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + validators "github.com/ava-labs/subnet-evm/plugin/evm/validators/state" + stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/uptime" + uptimeinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/uptime/interfaces" + + "github.com/ethereum/go-ethereum/log" +) + +const ( + SyncFrequency = 1 * time.Minute +) + +type manager struct { + chainCtx *snow.Context + stateinterfaces.State + uptimeinterfaces.PausableManager +} + +func NewManager( + ctx *snow.Context, + db database.Database, + clock *mockable.Clock, +) (interfaces.Manager, error) { + validatorState, err := validators.NewState(db) + if err != nil { + return nil, fmt.Errorf("failed to initialize validator state: %w", err) + } + + // Initialize uptime manager + uptimeManager := uptime.NewPausableManager(avalancheuptime.NewManager(validatorState, clock)) + validatorState.RegisterListener(uptimeManager) + + return &manager{ + chainCtx: ctx, + State: validatorState, + PausableManager: uptimeManager, + }, nil +} + +func (m *manager) DispatchSync(ctx context.Context) { + ticker := time.NewTicker(SyncFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + m.chainCtx.Lock.Lock() + if err := m.Sync(ctx); err != nil { + log.Error("failed to update validators", "error", err) + } + m.chainCtx.Lock.Unlock() + case <-ctx.Done(): + return + } + } +} + +// Sync synchronizes the validator state with the current validator set +// and writes the state to the database. +func (m *manager) Sync(ctx context.Context) error { + now := time.Now() + log.Debug("performing validator sync") + // get current validator set + currentValidatorSet, _, err := m.chainCtx.ValidatorState.GetCurrentValidatorSet(ctx, m.chainCtx.SubnetID) + if err != nil { + return fmt.Errorf("failed to get current validator set: %w", err) + } + + // load the current validator set into the validator state + if err := loadValidators(m.State, currentValidatorSet); err != nil { + return fmt.Errorf("failed to load current validators: %w", err) + } + + // write validators to the database + if err := m.State.WriteState(); err != nil { + return fmt.Errorf("failed to write validator state: %w", err) + } + + // TODO: add metrics + log.Debug("validator sync complete", "duration", time.Since(now)) + return nil +} + +// loadValidators loads the [validators] into the validator state [validatorState] +func loadValidators(validatorState stateinterfaces.State, newValidators map[ids.ID]*avalanchevalidators.GetCurrentValidatorOutput) error { + currentValidationIDs := validatorState.GetValidationIDs() + // first check if we need to delete any existing validators + for vID := range currentValidationIDs { + // if the validator is not in the new set of validators + // delete the validator + if _, exists := newValidators[vID]; !exists { + validatorState.DeleteValidator(vID) + } + } + + // then load the new validators + for newVID, newVdr := range newValidators { + currentVdr := stateinterfaces.Validator{ + ValidationID: newVID, + NodeID: newVdr.NodeID, + Weight: newVdr.Weight, + StartTimestamp: newVdr.StartTime, + IsActive: newVdr.IsActive, + IsSoV: newVdr.IsSoV, + } + if currentValidationIDs.Contains(newVID) { + if err := validatorState.UpdateValidator(currentVdr); err != nil { + return err + } + } else { + if err := validatorState.AddValidator(currentVdr); err != nil { + return err + } + } + } + return nil +} diff --git a/plugin/evm/validators/manager_test.go b/plugin/evm/validators/manager_test.go new file mode 100644 index 0000000000..282488f68f --- /dev/null +++ b/plugin/evm/validators/manager_test.go @@ -0,0 +1,221 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "testing" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/state" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + avagovalidators "github.com/ava-labs/avalanchego/snow/validators" +) + +func TestLoadNewValidators(t *testing.T) { + testNodeIDs := []ids.NodeID{ + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + } + testValidationIDs := []ids.ID{ + ids.GenerateTestID(), + ids.GenerateTestID(), + ids.GenerateTestID(), + } + tests := []struct { + name string + initialValidators map[ids.ID]*avagovalidators.GetCurrentValidatorOutput + newValidators map[ids.ID]*avagovalidators.GetCurrentValidatorOutput + registerMockListenerCalls func(*interfaces.MockStateCallbackListener) + expectedLoadErr error + }{ + { + name: "before empty/after empty", + initialValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{}, + newValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{}, + registerMockListenerCalls: func(*interfaces.MockStateCallbackListener) {}, + }, + { + name: "before empty/after one", + initialValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{}, + newValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + }, + }, + { + name: "before one/after empty", + initialValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{}, + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it will be removed + mock.EXPECT().OnValidatorRemoved(testValidationIDs[0], testNodeIDs[0]).Times(1) + }, + }, + { + name: "no change", + initialValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + }, + }, + { + name: "status and weight change and new one", + initialValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + Weight: 1, + }, + }, + newValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: false, + StartTime: 0, + Weight: 2, + }, + testValidationIDs[1]: { + NodeID: testNodeIDs[1], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it will be updated + mock.EXPECT().OnValidatorStatusUpdated(testValidationIDs[0], testNodeIDs[0], false).Times(1) + // new validator will be added + mock.EXPECT().OnValidatorAdded(testValidationIDs[1], testNodeIDs[1], uint64(0), true).Times(1) + }, + }, + { + name: "renew validation ID", + initialValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[1]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it will be removed + mock.EXPECT().OnValidatorRemoved(testValidationIDs[0], testNodeIDs[0]).Times(1) + // new validator will be added + mock.EXPECT().OnValidatorAdded(testValidationIDs[1], testNodeIDs[0], uint64(0), true).Times(1) + }, + }, + { + name: "renew node ID", + initialValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + IsActive: true, + StartTime: 0, + }, + }, + newValidators: map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[1], + IsActive: true, + StartTime: 0, + }, + }, + expectedLoadErr: state.ErrImmutableField, + registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { + // initial validator will trigger first + mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) + // then it won't be called since we don't track the node ID changes + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + require := require.New(tt) + db := memdb.New() + validatorState, err := state.NewState(db) + require.NoError(err) + + // set initial validators + for vID, validator := range test.initialValidators { + err := validatorState.AddValidator(interfaces.Validator{ + ValidationID: vID, + NodeID: validator.NodeID, + Weight: validator.Weight, + StartTimestamp: validator.StartTime, + IsActive: validator.IsActive, + IsSoV: validator.IsSoV, + }) + require.NoError(err) + } + // enable mock listener + ctrl := gomock.NewController(tt) + mockListener := interfaces.NewMockStateCallbackListener(ctrl) + test.registerMockListenerCalls(mockListener) + + validatorState.RegisterListener(mockListener) + // load new validators + err = loadValidators(validatorState, test.newValidators) + if test.expectedLoadErr != nil { + require.Error(err) + return + } + require.NoError(err) + // check if the state is as expected + require.Equal(len(test.newValidators), validatorState.GetValidationIDs().Len()) + for vID, validator := range test.newValidators { + v, err := validatorState.GetValidator(vID) + require.NoError(err) + require.Equal(validator.NodeID, v.NodeID) + require.Equal(validator.Weight, v.Weight) + require.Equal(validator.StartTime, v.StartTimestamp) + require.Equal(validator.IsActive, v.IsActive) + require.Equal(validator.IsSoV, v.IsSoV) + } + }) + } +} diff --git a/plugin/evm/validators/codec.go b/plugin/evm/validators/state/codec.go similarity index 96% rename from plugin/evm/validators/codec.go rename to plugin/evm/validators/state/codec.go index dadba8b273..aeb1a683b2 100644 --- a/plugin/evm/validators/codec.go +++ b/plugin/evm/validators/state/codec.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validators +package state import ( "math" diff --git a/plugin/evm/validators/interfaces/mock_listener.go b/plugin/evm/validators/state/interfaces/mock_listener.go similarity index 100% rename from plugin/evm/validators/interfaces/mock_listener.go rename to plugin/evm/validators/state/interfaces/mock_listener.go diff --git a/plugin/evm/validators/interfaces/interface.go b/plugin/evm/validators/state/interfaces/state.go similarity index 97% rename from plugin/evm/validators/interfaces/interface.go rename to plugin/evm/validators/state/interfaces/state.go index 39b6b8c9e9..9dea519798 100644 --- a/plugin/evm/validators/interfaces/interface.go +++ b/plugin/evm/validators/state/interfaces/state.go @@ -11,24 +11,27 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) +type StateReader interface { + // GetValidator returns the validator data for the given validation ID + GetValidator(vID ids.ID) (Validator, error) + // GetValidationIDs returns the validation IDs in the state + GetValidationIDs() set.Set[ids.ID] + // GetNodeIDs returns the validator node IDs in the state + GetNodeIDs() set.Set[ids.NodeID] +} + type State interface { uptime.State + StateReader // AddValidator adds a new validator to the state AddValidator(vdr Validator) error // UpdateValidator updates the validator in the state UpdateValidator(vdr Validator) error - // GetValidator returns the validator data for the given validation ID - GetValidator(vID ids.ID) (Validator, error) // DeleteValidator deletes the validator from the state DeleteValidator(vID ids.ID) error + // WriteState writes the validator state to the disk WriteState() error - - // GetValidationIDs returns the validation IDs in the state - GetValidationIDs() set.Set[ids.ID] - // GetNodeIDs returns the validator node IDs in the state - GetNodeIDs() set.Set[ids.NodeID] - // RegisterListener registers a listener to the state RegisterListener(StateCallbackListener) } diff --git a/plugin/evm/validators/state.go b/plugin/evm/validators/state/state.go similarity index 99% rename from plugin/evm/validators/state.go rename to plugin/evm/validators/state/state.go index 8f8279785c..4c62e90c29 100644 --- a/plugin/evm/validators/state.go +++ b/plugin/evm/validators/state/state.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validators +package state import ( "fmt" @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" ) var _ uptime.State = &state{} diff --git a/plugin/evm/validators/state_test.go b/plugin/evm/validators/state/state_test.go similarity index 98% rename from plugin/evm/validators/state_test.go rename to plugin/evm/validators/state/state_test.go index 2184a6fbbc..4581b3804e 100644 --- a/plugin/evm/validators/state_test.go +++ b/plugin/evm/validators/state/state_test.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validators +package state import ( "testing" @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" ) func TestState(t *testing.T) { diff --git a/plugin/evm/uptime/interfaces/interface.go b/plugin/evm/validators/uptime/interfaces/interface.go similarity index 67% rename from plugin/evm/uptime/interfaces/interface.go rename to plugin/evm/validators/uptime/interfaces/interface.go index 13e6b7abba..296daae314 100644 --- a/plugin/evm/uptime/interfaces/interface.go +++ b/plugin/evm/validators/uptime/interfaces/interface.go @@ -6,11 +6,11 @@ package interfaces import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/uptime" - validatorsinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + validatorsstateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" ) type PausableManager interface { uptime.Manager - validatorsinterfaces.StateCallbackListener + validatorsstateinterfaces.StateCallbackListener IsPaused(nodeID ids.NodeID) bool } diff --git a/plugin/evm/uptime/pausable_manager.go b/plugin/evm/validators/uptime/pausable_manager.go similarity index 98% rename from plugin/evm/uptime/pausable_manager.go rename to plugin/evm/validators/uptime/pausable_manager.go index 6c437dd049..a715c54257 100644 --- a/plugin/evm/uptime/pausable_manager.go +++ b/plugin/evm/validators/uptime/pausable_manager.go @@ -6,7 +6,7 @@ package uptime import ( "errors" - "github.com/ava-labs/subnet-evm/plugin/evm/uptime/interfaces" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/uptime/interfaces" "github.com/ethereum/go-ethereum/log" "github.com/ava-labs/avalanchego/ids" diff --git a/plugin/evm/uptime/pausable_manager_test.go b/plugin/evm/validators/uptime/pausable_manager_test.go similarity index 99% rename from plugin/evm/uptime/pausable_manager_test.go rename to plugin/evm/validators/uptime/pausable_manager_test.go index 15b4c29c33..53fec7bf39 100644 --- a/plugin/evm/uptime/pausable_manager_test.go +++ b/plugin/evm/validators/uptime/pausable_manager_test.go @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/subnet-evm/plugin/evm/uptime/interfaces" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/uptime/interfaces" "github.com/stretchr/testify/require" ) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index e9323f922f..d9d3a12a7f 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -41,10 +41,8 @@ import ( "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/peer" "github.com/ava-labs/subnet-evm/plugin/evm/message" - "github.com/ava-labs/subnet-evm/plugin/evm/uptime" - uptimeinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/uptime/interfaces" "github.com/ava-labs/subnet-evm/plugin/evm/validators" - validatorsinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/ava-labs/subnet-evm/triedb" "github.com/ava-labs/subnet-evm/triedb/hashdb" @@ -84,8 +82,6 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - avalancheUptime "github.com/ava-labs/avalanchego/snow/uptime" - avalancheValidators "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -136,8 +132,6 @@ const ( txGossipThrottlingPeriod = 10 * time.Second txGossipThrottlingLimit = 2 txGossipPollSize = 1 - - loadValidatorsFrequency = 1 * time.Minute ) // Define the API endpoints for the VM @@ -247,7 +241,7 @@ type VM struct { client peer.NetworkClient networkCodec codec.Manager - validators *p2p.Validators + p2pValidators *p2p.Validators // Metrics sdkMetrics *prometheus.Registry @@ -269,8 +263,7 @@ type VM struct { ethTxPushGossiper avalancheUtils.Atomic[*gossip.PushGossiper[*GossipEthTx]] ethTxPullGossiper gossip.Gossiper - uptimeManager uptimeinterfaces.PausableManager - validatorState validatorsinterfaces.State + validatorsManager interfaces.Manager chainAlias string // RPC handlers (should be stopped before closing chaindb) @@ -502,20 +495,16 @@ func (vm *VM) Initialize( if err != nil { return fmt.Errorf("failed to initialize p2p network: %w", err) } - vm.validators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) + vm.p2pValidators = p2p.NewValidators(p2pNetwork.Peers, vm.ctx.Log, vm.ctx.SubnetID, vm.ctx.ValidatorState, maxValidatorSetStaleness) vm.networkCodec = message.Codec vm.Network = peer.NewNetwork(p2pNetwork, appSender, vm.networkCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests) vm.client = peer.NewNetworkClient(vm.Network) - vm.validatorState, err = validators.NewState(vm.validatorsDB) + vm.validatorsManager, err = validators.NewManager(vm.ctx, vm.validatorsDB, &vm.clock) if err != nil { - return fmt.Errorf("failed to initialize validator state: %w", err) + return fmt.Errorf("failed to initialize validators manager: %w", err) } - // Initialize uptime manager - vm.uptimeManager = uptime.NewPausableManager(avalancheUptime.NewManager(vm.validatorState, &vm.clock)) - vm.validatorState.RegisterListener(vm.uptimeManager) - // Initialize warp backend offchainWarpMessages := make([][]byte, len(vm.config.WarpOffChainMessages)) for i, hexMsg := range vm.config.WarpOffChainMessages { @@ -539,8 +528,7 @@ func (vm *VM) Initialize( vm.ctx.ChainID, vm.ctx.WarpSigner, vm, - vm.uptimeManager, - vm.validatorState, + vm.validatorsManager, vm.ctx.Lock.RLocker(), vm.warpDB, meteredCache, @@ -740,26 +728,26 @@ func (vm *VM) onNormalOperationsStarted() error { ctx, cancel := context.WithCancel(context.TODO()) vm.cancel = cancel - // update validators first - if err := vm.performValidatorUpdate(ctx); err != nil { + // sync validators first + if err := vm.validatorsManager.Sync(ctx); err != nil { return fmt.Errorf("failed to update validators: %w", err) } - vdrIDs := vm.validatorState.GetNodeIDs().List() + vdrIDs := vm.validatorsManager.GetNodeIDs().List() // then start tracking with updated validators - if err := vm.uptimeManager.StartTracking(vdrIDs); err != nil { + if err := vm.validatorsManager.StartTracking(vdrIDs); err != nil { return fmt.Errorf("failed to start tracking uptime: %w", err) } // dispatch validator set update vm.shutdownWg.Add(1) go func() { - vm.dispatchUpdateValidators(ctx) + vm.validatorsManager.DispatchSync(ctx) vm.shutdownWg.Done() }() // Initialize goroutines related to block building // once we enter normal operation as there is no need to handle mempool gossip before this point. ethTxGossipMarshaller := GossipEthTxMarshaller{} - ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.validators)) + ethTxGossipClient := vm.Network.NewClient(p2p.TxGossipHandlerID, p2p.WithValidatorSampling(vm.p2pValidators)) ethTxGossipMetrics, err := gossip.NewMetrics(vm.sdkMetrics, ethTxGossipNamespace) if err != nil { return fmt.Errorf("failed to initialize eth tx gossip metrics: %w", err) @@ -789,7 +777,7 @@ func (vm *VM) onNormalOperationsStarted() error { ethTxPushGossiper, err = gossip.NewPushGossiper[*GossipEthTx]( ethTxGossipMarshaller, ethTxPool, - vm.validators, + vm.p2pValidators, ethTxGossipClient, ethTxGossipMetrics, pushGossipParams, @@ -819,7 +807,7 @@ func (vm *VM) onNormalOperationsStarted() error { txGossipTargetMessageSize, txGossipThrottlingPeriod, txGossipThrottlingLimit, - vm.validators, + vm.p2pValidators, ) } @@ -840,7 +828,7 @@ func (vm *VM) onNormalOperationsStarted() error { vm.ethTxPullGossiper = gossip.ValidatorGossiper{ Gossiper: ethTxPullGossiper, NodeID: vm.ctx.NodeID, - Validators: vm.validators, + Validators: vm.p2pValidators, } } @@ -885,11 +873,11 @@ func (vm *VM) Shutdown(context.Context) error { vm.cancel() } if vm.bootstrapped.Get() { - vdrIDs := vm.validatorState.GetNodeIDs().List() - if err := vm.uptimeManager.StopTracking(vdrIDs); err != nil { + vdrIDs := vm.validatorsManager.GetNodeIDs().List() + if err := vm.validatorsManager.StopTracking(vdrIDs); err != nil { return fmt.Errorf("failed to stop tracking uptime: %w", err) } - if err := vm.validatorState.WriteState(); err != nil { + if err := vm.validatorsManager.WriteState(); err != nil { return fmt.Errorf("failed to write validator: %w", err) } } @@ -1421,95 +1409,16 @@ func (vm *VM) createDatabase(dbConfig avalancheNode.DatabaseConfig) (database.Da } func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, version *version.Application) error { - if err := vm.uptimeManager.Connect(nodeID); err != nil { + if err := vm.validatorsManager.Connect(nodeID); err != nil { return fmt.Errorf("uptime manager failed to connect node %s: %w", nodeID, err) } return vm.Network.Connected(ctx, nodeID, version) } func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { - if err := vm.uptimeManager.Disconnect(nodeID); err != nil { + if err := vm.validatorsManager.Disconnect(nodeID); err != nil { return fmt.Errorf("uptime manager failed to disconnect node %s: %w", nodeID, err) } return vm.Network.Disconnected(ctx, nodeID) } - -func (vm *VM) dispatchUpdateValidators(ctx context.Context) { - ticker := time.NewTicker(loadValidatorsFrequency) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - vm.ctx.Lock.Lock() - if err := vm.performValidatorUpdate(ctx); err != nil { - log.Error("failed to update validators", "error", err) - } - vm.ctx.Lock.Unlock() - case <-ctx.Done(): - return - } - } -} - -// performValidatorUpdate updates the validator state with the current validator set -// and writes the state to the database. -func (vm *VM) performValidatorUpdate(ctx context.Context) error { - now := time.Now() - log.Debug("performing validator update") - // get current validator set - currentValidatorSet, _, err := vm.ctx.ValidatorState.GetCurrentValidatorSet(ctx, vm.ctx.SubnetID) - if err != nil { - return fmt.Errorf("failed to get current validator set: %w", err) - } - - // load the current validator set into the validator state - if err := loadValidators(vm.validatorState, currentValidatorSet); err != nil { - return fmt.Errorf("failed to load current validators: %w", err) - } - - // write validators to the database - if err := vm.validatorState.WriteState(); err != nil { - return fmt.Errorf("failed to write validator state: %w", err) - } - - // TODO: add metrics - log.Debug("validator update complete", "duration", time.Since(now)) - return nil -} - -// loadValidators loads the [validators] into the validator state [validatorState] -func loadValidators(validatorState validatorsinterfaces.State, newValidators map[ids.ID]*avalancheValidators.GetCurrentValidatorOutput) error { - currentValidationIDs := validatorState.GetValidationIDs() - // first check if we need to delete any existing validators - for vID := range currentValidationIDs { - // if the validator is not in the new set of validators - // delete the validator - if _, exists := newValidators[vID]; !exists { - validatorState.DeleteValidator(vID) - } - } - - // then load the new validators - for newVID, newVdr := range newValidators { - currentVdr := validatorsinterfaces.Validator{ - ValidationID: newVID, - NodeID: newVdr.NodeID, - Weight: newVdr.Weight, - StartTimestamp: newVdr.StartTime, - IsActive: newVdr.IsActive, - IsSoV: newVdr.IsSoV, - } - if currentValidationIDs.Contains(newVID) { - if err := validatorState.UpdateValidator(currentVdr); err != nil { - return err - } - } else { - if err := validatorState.AddValidator(currentVdr); err != nil { - return err - } - } - } - return nil -} diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 42e525d656..ee1204f84d 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api/keystore" @@ -33,6 +34,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" @@ -48,6 +50,7 @@ import ( "github.com/ava-labs/subnet-evm/eth" "github.com/ava-labs/subnet-evm/metrics" "github.com/ava-labs/subnet-evm/params" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/precompile/allowlist" "github.com/ava-labs/subnet-evm/precompile/contracts/deployerallowlist" "github.com/ava-labs/subnet-evm/precompile/contracts/feemanager" @@ -58,6 +61,7 @@ import ( "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/vmerrs" + avagovalidators "github.com/ava-labs/avalanchego/snow/validators" avagoconstants "github.com/ava-labs/avalanchego/utils/constants" ) @@ -3111,3 +3115,142 @@ func TestParentBeaconRootBlock(t *testing.T) { }) } } + +func TestValidatorState(t *testing.T) { + require := require.New(t) + genesis := &core.Genesis{} + require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONLatest))) + genesisJSON, err := genesis.MarshalJSON() + require.NoError(err) + + vm := &VM{} + ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) + appSender := &enginetest.Sender{T: t} + appSender.CantSendAppGossip = true + testNodeIDs := []ids.NodeID{ + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + } + testValidationIDs := []ids.ID{ + ids.GenerateTestID(), + ids.GenerateTestID(), + ids.GenerateTestID(), + } + ctx.ValidatorState = &validatorstest.State{ + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagovalidators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[1]: { + NodeID: testNodeIDs[1], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[2]: { + NodeID: testNodeIDs[2], + PublicKey: nil, + Weight: 1, + }, + }, 0, nil + }, + } + appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + err = vm.Initialize( + context.Background(), + ctx, + dbManager, + genesisBytes, + []byte(""), + []byte(""), + issuer, + []*commonEng.Fx{}, + appSender, + ) + require.NoError(err, "error initializing GenesisVM") + + // Test case 1: state should not be populated until bootstrapped + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.Equal(0, vm.validatorsManager.GetValidationIDs().Len()) + _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) + require.ErrorIs(database.ErrNotFound, err) + require.False(vm.validatorsManager.StartedTracking()) + + // Test case 2: state should be populated after bootstrapped + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.Len(vm.validatorsManager.GetValidationIDs(), 3) + _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) + require.NoError(err) + require.True(vm.validatorsManager.StartedTracking()) + + // Test case 3: restarting VM should not lose state + vm.Shutdown(context.Background()) + // Shutdown should stop tracking + require.False(vm.validatorsManager.StartedTracking()) + + vm = &VM{} + err = vm.Initialize( + context.Background(), + utils.TestSnowContext(), // this context does not have validators state, making VM to source it from the database + dbManager, + genesisBytes, + []byte(""), + []byte(""), + issuer, + []*commonEng.Fx{}, + appSender, + ) + require.NoError(err, "error initializing GenesisVM") + require.Len(vm.validatorsManager.GetValidationIDs(), 3) + _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) + require.NoError(err) + require.False(vm.validatorsManager.StartedTracking()) + + // Test case 4: new validators should be added to the state + newValidationID := ids.GenerateTestID() + newNodeID := ids.GenerateTestNodeID() + testState := &validatorstest.State{ + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagovalidators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[1]: { + NodeID: testNodeIDs[1], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[2]: { + NodeID: testNodeIDs[2], + PublicKey: nil, + Weight: 1, + }, + newValidationID: { + NodeID: newNodeID, + PublicKey: nil, + Weight: 1, + }, + }, 0, nil + }, + } + // set VM as bootstrapped + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + + vm.ctx.ValidatorState = testState + + // new validator should be added to the state eventually after SyncFrequency + require.EventuallyWithT(func(c *assert.CollectT) { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + assert.Len(c, vm.validatorsManager.GetNodeIDs(), 4) + newValidator, err := vm.validatorsManager.GetValidator(newValidationID) + assert.NoError(c, err) + assert.Equal(c, newNodeID, newValidator.NodeID) + }, validators.SyncFrequency*2, 5*time.Second) +} diff --git a/plugin/evm/vm_validators_state_test.go b/plugin/evm/vm_validators_state_test.go deleted file mode 100644 index 8bf8c3546f..0000000000 --- a/plugin/evm/vm_validators_state_test.go +++ /dev/null @@ -1,363 +0,0 @@ -package evm - -import ( - "context" - "testing" - "time" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/enginetest" - avagoValidators "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/plugin/evm/validators" - "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" - "github.com/ava-labs/subnet-evm/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" -) - -func TestValidatorState(t *testing.T) { - require := require.New(t) - genesis := &core.Genesis{} - require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONLatest))) - genesisJSON, err := genesis.MarshalJSON() - require.NoError(err) - - vm := &VM{} - ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) - appSender := &enginetest.Sender{T: t} - appSender.CantSendAppGossip = true - testNodeIDs := []ids.NodeID{ - ids.GenerateTestNodeID(), - ids.GenerateTestNodeID(), - ids.GenerateTestNodeID(), - } - testValidationIDs := []ids.ID{ - ids.GenerateTestID(), - ids.GenerateTestID(), - ids.GenerateTestID(), - } - ctx.ValidatorState = &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { - return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[1]: { - NodeID: testNodeIDs[1], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[2]: { - NodeID: testNodeIDs[2], - PublicKey: nil, - Weight: 1, - }, - }, 0, nil - }, - } - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - err = vm.Initialize( - context.Background(), - ctx, - dbManager, - genesisBytes, - []byte(""), - []byte(""), - issuer, - []*commonEng.Fx{}, - appSender, - ) - require.NoError(err, "error initializing GenesisVM") - - // Test case 1: state should not be populated until bootstrapped - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.Equal(0, vm.validatorState.GetValidationIDs().Len()) - _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) - require.ErrorIs(database.ErrNotFound, err) - require.False(vm.uptimeManager.StartedTracking()) - - // Test case 2: state should be populated after bootstrapped - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - require.Len(vm.validatorState.GetValidationIDs(), 3) - _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) - require.NoError(err) - require.True(vm.uptimeManager.StartedTracking()) - - // Test case 3: restarting VM should not lose state - vm.Shutdown(context.Background()) - // Shutdown should stop tracking - require.False(vm.uptimeManager.StartedTracking()) - - vm = &VM{} - err = vm.Initialize( - context.Background(), - utils.TestSnowContext(), // this context does not have validators state, making VM to source it from the database - dbManager, - genesisBytes, - []byte(""), - []byte(""), - issuer, - []*commonEng.Fx{}, - appSender, - ) - require.NoError(err, "error initializing GenesisVM") - require.Len(vm.validatorState.GetValidationIDs(), 3) - _, _, err = vm.uptimeManager.CalculateUptime(testNodeIDs[0]) - require.NoError(err) - require.False(vm.uptimeManager.StartedTracking()) - - // Test case 4: new validators should be added to the state - newValidationID := ids.GenerateTestID() - newNodeID := ids.GenerateTestNodeID() - testState := &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagoValidators.GetCurrentValidatorOutput, uint64, error) { - return map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[1]: { - NodeID: testNodeIDs[1], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[2]: { - NodeID: testNodeIDs[2], - PublicKey: nil, - Weight: 1, - }, - newValidationID: { - NodeID: newNodeID, - PublicKey: nil, - Weight: 1, - }, - }, 0, nil - }, - } - vm.ctx.ValidatorState = testState - // set VM as bootstrapped - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - - // new validator should be added to the state eventually after validatorsLoadFrequency - require.EventuallyWithT(func(c *assert.CollectT) { - assert.Len(c, vm.validatorState.GetNodeIDs(), 4) - newValidator, err := vm.validatorState.GetValidator(newValidationID) - assert.NoError(c, err) - assert.Equal(c, newNodeID, newValidator.NodeID) - }, loadValidatorsFrequency*2, 5*time.Second) -} - -func TestLoadNewValidators(t *testing.T) { - testNodeIDs := []ids.NodeID{ - ids.GenerateTestNodeID(), - ids.GenerateTestNodeID(), - ids.GenerateTestNodeID(), - } - testValidationIDs := []ids.ID{ - ids.GenerateTestID(), - ids.GenerateTestID(), - ids.GenerateTestID(), - } - tests := []struct { - name string - initialValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput - newValidators map[ids.ID]*avagoValidators.GetCurrentValidatorOutput - registerMockListenerCalls func(*interfaces.MockStateCallbackListener) - expectedLoadErr error - }{ - { - name: "before empty/after empty", - initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, - newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, - registerMockListenerCalls: func(*interfaces.MockStateCallbackListener) {}, - }, - { - name: "before empty/after one", - initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, - newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - }, - }, - registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { - mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) - }, - }, - { - name: "before one/after empty", - initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - }, - }, - newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{}, - registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { - // initial validator will trigger first - mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) - // then it will be removed - mock.EXPECT().OnValidatorRemoved(testValidationIDs[0], testNodeIDs[0]).Times(1) - }, - }, - { - name: "no change", - initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - }, - }, - newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - }, - }, - registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { - mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) - }, - }, - { - name: "status and weight change and new one", - initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - Weight: 1, - }, - }, - newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: false, - StartTime: 0, - Weight: 2, - }, - testValidationIDs[1]: { - NodeID: testNodeIDs[1], - IsActive: true, - StartTime: 0, - }, - }, - registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { - // initial validator will trigger first - mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) - // then it will be updated - mock.EXPECT().OnValidatorStatusUpdated(testValidationIDs[0], testNodeIDs[0], false).Times(1) - // new validator will be added - mock.EXPECT().OnValidatorAdded(testValidationIDs[1], testNodeIDs[1], uint64(0), true).Times(1) - }, - }, - { - name: "renew validation ID", - initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - }, - }, - newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[1]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - }, - }, - registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { - // initial validator will trigger first - mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) - // then it will be removed - mock.EXPECT().OnValidatorRemoved(testValidationIDs[0], testNodeIDs[0]).Times(1) - // new validator will be added - mock.EXPECT().OnValidatorAdded(testValidationIDs[1], testNodeIDs[0], uint64(0), true).Times(1) - }, - }, - { - name: "renew node ID", - initialValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - IsActive: true, - StartTime: 0, - }, - }, - newValidators: map[ids.ID]*avagoValidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[1], - IsActive: true, - StartTime: 0, - }, - }, - expectedLoadErr: validators.ErrImmutableField, - registerMockListenerCalls: func(mock *interfaces.MockStateCallbackListener) { - // initial validator will trigger first - mock.EXPECT().OnValidatorAdded(testValidationIDs[0], testNodeIDs[0], uint64(0), true).Times(1) - // then it won't be called since we don't track the node ID changes - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - require := require.New(tt) - db := memdb.New() - validatorState, err := validators.NewState(db) - require.NoError(err) - - // set initial validators - for vID, validator := range test.initialValidators { - err := validatorState.AddValidator(interfaces.Validator{ - ValidationID: vID, - NodeID: validator.NodeID, - Weight: validator.Weight, - StartTimestamp: validator.StartTime, - IsActive: validator.IsActive, - IsSoV: validator.IsSoV, - }) - require.NoError(err) - } - // enable mock listener - ctrl := gomock.NewController(tt) - mockListener := interfaces.NewMockStateCallbackListener(ctrl) - test.registerMockListenerCalls(mockListener) - - validatorState.RegisterListener(mockListener) - // load new validators - err = loadValidators(validatorState, test.newValidators) - if test.expectedLoadErr != nil { - require.Error(err) - return - } - require.NoError(err) - // check if the state is as expected - require.Equal(len(test.newValidators), validatorState.GetValidationIDs().Len()) - for vID, validator := range test.newValidators { - v, err := validatorState.GetValidator(vID) - require.NoError(err) - require.Equal(validator.NodeID, v.NodeID) - require.Equal(validator.Weight, v.Weight) - require.Equal(validator.StartTime, v.StartTimestamp) - require.Equal(validator.IsActive, v.IsActive) - require.Equal(validator.IsSoV, v.IsSoV) - } - }) - } -} diff --git a/warp/backend.go b/warp/backend.go index 1b66a9db71..50b4774bec 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/uptime" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" @@ -59,9 +58,8 @@ type backend struct { db database.Database warpSigner avalancheWarp.Signer blockClient BlockClient - uptimeCalculator uptime.Calculator - validatorState interfaces.State - stateLock sync.Locker + validatorReader interfaces.ValidatorReader + validatorLock sync.Locker signatureCache cache.Cacher[ids.ID, []byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage @@ -74,9 +72,8 @@ func NewBackend( sourceChainID ids.ID, warpSigner avalancheWarp.Signer, blockClient BlockClient, - uptimeCalculator uptime.Calculator, - validatorsState interfaces.State, - stateLock sync.Locker, + validatorReader interfaces.ValidatorReader, + validatorLock sync.Locker, db database.Database, signatureCache cache.Cacher[ids.ID, []byte], offchainMessages [][]byte, @@ -88,9 +85,8 @@ func NewBackend( warpSigner: warpSigner, blockClient: blockClient, signatureCache: signatureCache, - uptimeCalculator: uptimeCalculator, - validatorState: validatorsState, - stateLock: stateLock, + validatorReader: validatorReader, + validatorLock: validatorLock, messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: messageCacheSize}, stats: newVerifierStats(), offchainAddressedCallMsgs: make(map[ids.ID]*avalancheWarp.UnsignedMessage), diff --git a/warp/backend_test.go b/warp/backend_test.go index a2610d19d4..916c255b4a 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -11,12 +11,10 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" - "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/ava-labs/subnet-evm/warp/warptest" "github.com/stretchr/testify/require" ) @@ -47,7 +45,7 @@ func TestAddAndGetValidMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, interfaces.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -70,7 +68,7 @@ func TestAddAndGetUnknownMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, interfaces.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(t, err) // Try getting a signature for a message that was not added. @@ -89,7 +87,7 @@ func TestGetBlockSignature(t *testing.T) { require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, uptime.NoOpCalculator, interfaces.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(err) blockHashPayload, err := payload.NewHash(blkID) @@ -116,7 +114,7 @@ func TestZeroSizedCache(t *testing.T) { // Verify zero sized cache works normally, because the lru cache will be initialized to size 1 for any size parameter <= 0. messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, interfaces.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -170,7 +168,7 @@ func TestOffChainMessages(t *testing.T) { db := memdb.New() messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, uptime.NoOpCalculator, interfaces.NoOpState, &sync.RWMutex{}, db, messageSignatureCache, test.offchainMessages) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, test.offchainMessages) require.ErrorIs(err, test.err) if test.check != nil { test.check(require, backend) @@ -178,3 +176,5 @@ func TestOffChainMessages(t *testing.T) { }) } } + +type noOpValidatorReader struct{} diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index d27bd1a295..b74b9332c3 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -10,12 +10,10 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/crypto/bls" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/plugin/evm/message" - "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/warp" "github.com/ava-labs/subnet-evm/warp/warptest" @@ -35,7 +33,7 @@ func TestMessageSignatureHandler(t *testing.T) { require.NoError(t, err) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, interfaces.NoOpState, snowCtx.Lock.RLocker(), database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) + backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, snowCtx.Lock.RLocker(), database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) @@ -141,8 +139,7 @@ func TestBlockSignatureHandler(t *testing.T) { snowCtx.ChainID, warpSigner, blockClient, - uptime.NoOpCalculator, - interfaces.NoOpState, + warptest.NoOpValidatorReader{}, snowCtx.Lock.RLocker(), database, messageSignatureCache, diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index a73a1d1ea9..bff214665c 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -104,10 +104,10 @@ func (b *backend) verifyOffchainAddressedCall(addressedCall *payload.AddressedCa } func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *common.AppError { - b.stateLock.Lock() - defer b.stateLock.Unlock() + b.validatorLock.Lock() + defer b.validatorLock.Unlock() // first get the validator's nodeID - vdr, err := b.validatorState.GetValidator(uptimeMsg.ValidationID) + vdr, err := b.validatorReader.GetValidator(uptimeMsg.ValidationID) if err != nil { return &common.AppError{ Code: VerifyErrCode, @@ -117,7 +117,7 @@ func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *comm nodeID := vdr.NodeID // then get the current uptime - currentUptime, _, err := b.uptimeCalculator.CalculateUptime(nodeID) + currentUptime, _, err := b.validatorReader.CalculateUptime(nodeID) if err != nil { return &common.AppError{ Code: VerifyErrCode, diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index d72ec94fa7..f5ac8351bc 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -14,13 +14,12 @@ import ( "github.com/ava-labs/avalanchego/network/p2p/acp118" "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/timer/mockable" avalancheWarp "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/platformvm/warp/payload" "github.com/ava-labs/subnet-evm/plugin/evm/validators" - "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/warp/messages" "github.com/ava-labs/subnet-evm/warp/warptest" @@ -103,7 +102,7 @@ func TestAddressedCallSignatures(t *testing.T) { } else { sigCache = &cache.Empty[ids.ID, []byte]{} } - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptime.NoOpCalculator, interfaces.NoOpState, snowCtx.Lock.RLocker(), database, sigCache, [][]byte{offchainMessage.Bytes()}) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, snowCtx.Lock.RLocker(), database, sigCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) @@ -218,8 +217,7 @@ func TestBlockSignatures(t *testing.T) { snowCtx.ChainID, warpSigner, blockClient, - uptime.NoOpCalculator, - interfaces.NoOpState, + warptest.NoOpValidatorReader{}, snowCtx.Lock.RLocker(), database, sigCache, @@ -290,12 +288,12 @@ func TestUptimeSignatures(t *testing.T) { } else { sigCache = &cache.Empty[ids.ID, []byte]{} } - state, err := validators.NewState(memdb.New()) - require.NoError(t, err) + chainCtx := utils.TestSnowContext() clk := &mockable.Clock{} - uptimeManager := uptime.NewManager(state, clk) - uptimeManager.StartTracking([]ids.NodeID{}) - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, uptimeManager, state, snowCtx.Lock.RLocker(), database, sigCache, nil) + validatorsManager, err := validators.NewManager(chainCtx, memdb.New(), clk) + require.NoError(t, err) + validatorsManager.StartTracking([]ids.NodeID{}) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, validatorsManager, snowCtx.Lock.RLocker(), database, sigCache, nil) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) @@ -315,7 +313,7 @@ func TestUptimeSignatures(t *testing.T) { // uptime is less than requested (not connected) validationID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() - require.NoError(t, state.AddValidator(interfaces.Validator{ + require.NoError(t, validatorsManager.AddValidator(stateinterfaces.Validator{ ValidationID: validationID, NodeID: nodeID, Weight: 1, @@ -329,7 +327,7 @@ func TestUptimeSignatures(t *testing.T) { require.Contains(t, appErr.Error(), "current uptime 0 is less than queried uptime 80") // uptime is less than requested (not enough) - require.NoError(t, uptimeManager.Connect(nodeID)) + require.NoError(t, validatorsManager.Connect(nodeID)) clk.Set(clk.Time().Add(40 * time.Second)) protoBytes, _ = getUptimeMessageBytes([]byte{}, validationID, 80) _, appErr = handler.AppRequest(context.Background(), nodeID, time.Time{}, protoBytes) diff --git a/warp/warptest/noop_validator_reader.go b/warp/warptest/noop_validator_reader.go new file mode 100644 index 0000000000..01e6e31d7a --- /dev/null +++ b/warp/warptest/noop_validator_reader.go @@ -0,0 +1,42 @@ +// (c) 2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// warptest exposes common functionality for testing the warp package. +package warptest + +import ( + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" + stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" +) + +var _ interfaces.ValidatorReader = &NoOpValidatorReader{} + +type NoOpValidatorReader struct{} + +func (NoOpValidatorReader) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { + return 0, time.Time{}, nil +} + +func (NoOpValidatorReader) CalculateUptimePercent(nodeID ids.NodeID) (float64, error) { + return 0, nil +} + +func (NoOpValidatorReader) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) { + return 0, nil +} + +func (NoOpValidatorReader) GetNodeIDs() set.Set[ids.NodeID] { + return set.Set[ids.NodeID]{} +} + +func (NoOpValidatorReader) GetValidationIDs() set.Set[ids.ID] { + return set.Set[ids.ID]{} +} + +func (NoOpValidatorReader) GetValidator(vID ids.ID) (stateinterfaces.Validator, error) { + return stateinterfaces.Validator{}, nil +} From 377a22f48275e18c02035983b7d3bdb76abaf7f1 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 15 Nov 2024 01:52:26 +0300 Subject: [PATCH 94/98] Remove unused var --- plugin/evm/vm.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index f3c7c8d880..c1f36e6c82 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -132,8 +132,6 @@ const ( txGossipThrottlingPeriod = 10 * time.Second txGossipThrottlingLimit = 2 txGossipPollSize = 1 - - loadValidatorsFrequency = 1 * time.Minute ) // Define the API endpoints for the VM From 8390b5f572fd7ff97c817ffc93e9fc257aeeaccc Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 15 Nov 2024 01:57:49 +0300 Subject: [PATCH 95/98] regen mocks --- plugin/evm/validators/state/interfaces/mock_listener.go | 4 ++-- scripts/mocks.mockgen.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugin/evm/validators/state/interfaces/mock_listener.go b/plugin/evm/validators/state/interfaces/mock_listener.go index 8cf1903729..053a371d16 100644 --- a/plugin/evm/validators/state/interfaces/mock_listener.go +++ b/plugin/evm/validators/state/interfaces/mock_listener.go @@ -1,9 +1,9 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces (interfaces: StateCallbackListener) +// Source: github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces (interfaces: StateCallbackListener) // // Generated by this command: // -// mockgen -package=interfaces -destination=plugin/evm/validators/interfaces/mock_listener.go github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces StateCallbackListener +// mockgen -package=interfaces -destination=plugin/evm/validators/state/interfaces/mock_listener.go github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces StateCallbackListener // // Package interfaces is a generated GoMock package. diff --git a/scripts/mocks.mockgen.txt b/scripts/mocks.mockgen.txt index 43a9d60cad..aba87c80da 100644 --- a/scripts/mocks.mockgen.txt +++ b/scripts/mocks.mockgen.txt @@ -1,3 +1,3 @@ github.com/ava-labs/subnet-evm/precompile/precompileconfig=Predicater,Config,ChainConfig,Accepter=precompile/precompileconfig/mocks.go github.com/ava-labs/subnet-evm/precompile/contract=BlockContext,AccessibleState,StateDB=precompile/contract/mocks.go -github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces=StateCallbackListener=plugin/evm/validators/interfaces/mock_listener.go +github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces=StateCallbackListener=plugin/evm/validators/state/interfaces/mock_listener.go From 42532d1fdd79209f040e2d07586d844a909dec99 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 15 Nov 2024 15:46:54 +0300 Subject: [PATCH 96/98] add GetValidatorAndUptime to validator manager --- .../evm/validators/interfaces/interfaces.go | 11 +- plugin/evm/validators/manager.go | 27 +++ plugin/evm/vm.go | 1 - plugin/evm/vm_test.go | 142 --------------- plugin/evm/vm_validators_test.go | 161 ++++++++++++++++++ warp/backend.go | 4 - warp/backend_test.go | 11 +- warp/handlers/signature_request_test.go | 3 +- warp/verifier_backend.go | 19 +-- warp/verifier_backend_test.go | 5 +- warp/warptest/noop_validator_reader.go | 25 +-- 11 files changed, 210 insertions(+), 199 deletions(-) create mode 100644 plugin/evm/vm_validators_test.go diff --git a/plugin/evm/validators/interfaces/interfaces.go b/plugin/evm/validators/interfaces/interfaces.go index 338833135c..d3a1e3c384 100644 --- a/plugin/evm/validators/interfaces/interfaces.go +++ b/plugin/evm/validators/interfaces/interfaces.go @@ -5,19 +5,26 @@ package interfaces import ( "context" + "time" + "github.com/ava-labs/avalanchego/ids" avalancheuptime "github.com/ava-labs/avalanchego/snow/uptime" stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" ) type ValidatorReader interface { - stateinterfaces.StateReader - avalancheuptime.Calculator + // GetValidatorUptime returns the uptime of the validator specified by validationID + GetValidatorAndUptime(validationID ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) } type Manager interface { stateinterfaces.State avalancheuptime.Manager + ValidatorReader + + // Sync updates the validator set managed + // by the manager Sync(ctx context.Context) error + // DispatchSync starts the sync process DispatchSync(ctx context.Context) } diff --git a/plugin/evm/validators/manager.go b/plugin/evm/validators/manager.go index 7ff4383a90..51be7606ad 100644 --- a/plugin/evm/validators/manager.go +++ b/plugin/evm/validators/manager.go @@ -33,6 +33,8 @@ type manager struct { uptimeinterfaces.PausableManager } +// NewManager returns a new validator manager +// that manages the validator state and the uptime manager. func NewManager( ctx *snow.Context, db database.Database, @@ -54,6 +56,30 @@ func NewManager( }, nil } +// GetValidatorUptime returns the calculated uptime of the validator specified by validationID +// and the last updated time. +// GetValidatorUptime holds the chain context lock while performing the operation and can be called concurrently. +func (m *manager) GetValidatorAndUptime(validationID ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) { + // lock the state + m.chainCtx.Lock.RLock() + defer m.chainCtx.Lock.RUnlock() + + // Get validator first + vdr, err := m.GetValidator(validationID) + if err != nil { + return stateinterfaces.Validator{}, 0, time.Time{}, fmt.Errorf("failed to get validator: %w", err) + } + + uptime, lastUpdated, err := m.CalculateUptime(vdr.NodeID) + if err != nil { + return stateinterfaces.Validator{}, 0, time.Time{}, fmt.Errorf("failed to get uptime: %w", err) + } + + return vdr, uptime, lastUpdated, nil +} + +// DispatchSync starts the sync process +// DispatchSync holds the chain context lock while performing the sync. func (m *manager) DispatchSync(ctx context.Context) { ticker := time.NewTicker(SyncFrequency) defer ticker.Stop() @@ -74,6 +100,7 @@ func (m *manager) DispatchSync(ctx context.Context) { // Sync synchronizes the validator state with the current validator set // and writes the state to the database. +// Sync is not safe to call concurrently and should be called with the chain context locked. func (m *manager) Sync(ctx context.Context) error { now := time.Now() log.Debug("performing validator sync") diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index c1f36e6c82..5b6298dd1c 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -529,7 +529,6 @@ func (vm *VM) Initialize( vm.ctx.WarpSigner, vm, vm.validatorsManager, - vm.ctx.Lock.RLocker(), vm.warpDB, meteredCache, offchainWarpMessages, diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index af77fcaf19..55fde2844f 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -34,7 +34,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/enginetest" - "github.com/ava-labs/avalanchego/snow/validators/validatorstest" "github.com/ava-labs/avalanchego/upgrade" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" @@ -50,7 +49,6 @@ import ( "github.com/ava-labs/subnet-evm/eth" "github.com/ava-labs/subnet-evm/metrics" "github.com/ava-labs/subnet-evm/params" - "github.com/ava-labs/subnet-evm/plugin/evm/validators" "github.com/ava-labs/subnet-evm/precompile/allowlist" "github.com/ava-labs/subnet-evm/precompile/contracts/deployerallowlist" "github.com/ava-labs/subnet-evm/precompile/contracts/feemanager" @@ -61,7 +59,6 @@ import ( "github.com/ava-labs/subnet-evm/utils" "github.com/ava-labs/subnet-evm/vmerrs" - avagovalidators "github.com/ava-labs/avalanchego/snow/validators" avagoconstants "github.com/ava-labs/avalanchego/utils/constants" ) @@ -3176,142 +3173,3 @@ func TestStandaloneDB(t *testing.T) { assert.False(t, isDBEmpty(vm.db)) assert.False(t, isDBEmpty(vm.acceptedBlockDB)) } - -func TestValidatorState(t *testing.T) { - require := require.New(t) - genesis := &core.Genesis{} - require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONLatest))) - genesisJSON, err := genesis.MarshalJSON() - require.NoError(err) - - vm := &VM{} - ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) - appSender := &enginetest.Sender{T: t} - appSender.CantSendAppGossip = true - testNodeIDs := []ids.NodeID{ - ids.GenerateTestNodeID(), - ids.GenerateTestNodeID(), - ids.GenerateTestNodeID(), - } - testValidationIDs := []ids.ID{ - ids.GenerateTestID(), - ids.GenerateTestID(), - ids.GenerateTestID(), - } - ctx.ValidatorState = &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagovalidators.GetCurrentValidatorOutput, uint64, error) { - return map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[1]: { - NodeID: testNodeIDs[1], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[2]: { - NodeID: testNodeIDs[2], - PublicKey: nil, - Weight: 1, - }, - }, 0, nil - }, - } - appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } - err = vm.Initialize( - context.Background(), - ctx, - dbManager, - genesisBytes, - []byte(""), - []byte(""), - issuer, - []*commonEng.Fx{}, - appSender, - ) - require.NoError(err, "error initializing GenesisVM") - - // Test case 1: state should not be populated until bootstrapped - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.Equal(0, vm.validatorsManager.GetValidationIDs().Len()) - _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) - require.ErrorIs(database.ErrNotFound, err) - require.False(vm.validatorsManager.StartedTracking()) - - // Test case 2: state should be populated after bootstrapped - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - require.Len(vm.validatorsManager.GetValidationIDs(), 3) - _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) - require.NoError(err) - require.True(vm.validatorsManager.StartedTracking()) - - // Test case 3: restarting VM should not lose state - vm.Shutdown(context.Background()) - // Shutdown should stop tracking - require.False(vm.validatorsManager.StartedTracking()) - - vm = &VM{} - err = vm.Initialize( - context.Background(), - utils.TestSnowContext(), // this context does not have validators state, making VM to source it from the database - dbManager, - genesisBytes, - []byte(""), - []byte(""), - issuer, - []*commonEng.Fx{}, - appSender, - ) - require.NoError(err, "error initializing GenesisVM") - require.Len(vm.validatorsManager.GetValidationIDs(), 3) - _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) - require.NoError(err) - require.False(vm.validatorsManager.StartedTracking()) - - // Test case 4: new validators should be added to the state - newValidationID := ids.GenerateTestID() - newNodeID := ids.GenerateTestNodeID() - testState := &validatorstest.State{ - GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagovalidators.GetCurrentValidatorOutput, uint64, error) { - return map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ - testValidationIDs[0]: { - NodeID: testNodeIDs[0], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[1]: { - NodeID: testNodeIDs[1], - PublicKey: nil, - Weight: 1, - }, - testValidationIDs[2]: { - NodeID: testNodeIDs[2], - PublicKey: nil, - Weight: 1, - }, - newValidationID: { - NodeID: newNodeID, - PublicKey: nil, - Weight: 1, - }, - }, 0, nil - }, - } - // set VM as bootstrapped - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) - - vm.ctx.ValidatorState = testState - - // new validator should be added to the state eventually after SyncFrequency - require.EventuallyWithT(func(c *assert.CollectT) { - vm.ctx.Lock.Lock() - defer vm.ctx.Lock.Unlock() - assert.Len(c, vm.validatorsManager.GetNodeIDs(), 4) - newValidator, err := vm.validatorsManager.GetValidator(newValidationID) - assert.NoError(c, err) - assert.Equal(c, newNodeID, newValidator.NodeID) - }, validators.SyncFrequency*2, 5*time.Second) -} diff --git a/plugin/evm/vm_validators_test.go b/plugin/evm/vm_validators_test.go new file mode 100644 index 0000000000..d8d0719fde --- /dev/null +++ b/plugin/evm/vm_validators_test.go @@ -0,0 +1,161 @@ +// See the file LICENSE for licensing terms. + +package evm + +import ( + "context" + "testing" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + commonEng "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/enginetest" + avagovalidators "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" + "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/subnet-evm/plugin/evm/validators" + "github.com/ava-labs/subnet-evm/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidatorState(t *testing.T) { + require := require.New(t) + genesis := &core.Genesis{} + require.NoError(genesis.UnmarshalJSON([]byte(genesisJSONLatest))) + genesisJSON, err := genesis.MarshalJSON() + require.NoError(err) + + vm := &VM{} + ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, string(genesisJSON)) + appSender := &enginetest.Sender{T: t} + appSender.CantSendAppGossip = true + testNodeIDs := []ids.NodeID{ + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + ids.GenerateTestNodeID(), + } + testValidationIDs := []ids.ID{ + ids.GenerateTestID(), + ids.GenerateTestID(), + ids.GenerateTestID(), + } + ctx.ValidatorState = &validatorstest.State{ + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagovalidators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[1]: { + NodeID: testNodeIDs[1], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[2]: { + NodeID: testNodeIDs[2], + PublicKey: nil, + Weight: 1, + }, + }, 0, nil + }, + } + appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + err = vm.Initialize( + context.Background(), + ctx, + dbManager, + genesisBytes, + []byte(""), + []byte(""), + issuer, + []*commonEng.Fx{}, + appSender, + ) + require.NoError(err, "error initializing GenesisVM") + + // Test case 1: state should not be populated until bootstrapped + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.Equal(0, vm.validatorsManager.GetValidationIDs().Len()) + _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) + require.ErrorIs(database.ErrNotFound, err) + require.False(vm.validatorsManager.StartedTracking()) + + // Test case 2: state should be populated after bootstrapped + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.Len(vm.validatorsManager.GetValidationIDs(), 3) + _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) + require.NoError(err) + require.True(vm.validatorsManager.StartedTracking()) + + // Test case 3: restarting VM should not lose state + vm.Shutdown(context.Background()) + // Shutdown should stop tracking + require.False(vm.validatorsManager.StartedTracking()) + + vm = &VM{} + err = vm.Initialize( + context.Background(), + utils.TestSnowContext(), // this context does not have validators state, making VM to source it from the database + dbManager, + genesisBytes, + []byte(""), + []byte(""), + issuer, + []*commonEng.Fx{}, + appSender, + ) + require.NoError(err, "error initializing GenesisVM") + require.Len(vm.validatorsManager.GetValidationIDs(), 3) + _, _, err = vm.validatorsManager.CalculateUptime(testNodeIDs[0]) + require.NoError(err) + require.False(vm.validatorsManager.StartedTracking()) + + // Test case 4: new validators should be added to the state + newValidationID := ids.GenerateTestID() + newNodeID := ids.GenerateTestNodeID() + testState := &validatorstest.State{ + GetCurrentValidatorSetF: func(ctx context.Context, subnetID ids.ID) (map[ids.ID]*avagovalidators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*avagovalidators.GetCurrentValidatorOutput{ + testValidationIDs[0]: { + NodeID: testNodeIDs[0], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[1]: { + NodeID: testNodeIDs[1], + PublicKey: nil, + Weight: 1, + }, + testValidationIDs[2]: { + NodeID: testNodeIDs[2], + PublicKey: nil, + Weight: 1, + }, + newValidationID: { + NodeID: newNodeID, + PublicKey: nil, + Weight: 1, + }, + }, 0, nil + }, + } + // set VM as bootstrapped + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + + vm.ctx.ValidatorState = testState + + // new validator should be added to the state eventually after SyncFrequency + require.EventuallyWithT(func(c *assert.CollectT) { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + assert.Len(c, vm.validatorsManager.GetNodeIDs(), 4) + newValidator, err := vm.validatorsManager.GetValidator(newValidationID) + assert.NoError(c, err) + assert.Equal(c, newNodeID, newValidator.NodeID) + }, validators.SyncFrequency*2, 5*time.Second) +} diff --git a/warp/backend.go b/warp/backend.go index ab0fbf4f57..84e62c7a57 100644 --- a/warp/backend.go +++ b/warp/backend.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "sync" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" @@ -59,7 +58,6 @@ type backend struct { warpSigner avalancheWarp.Signer blockClient BlockClient validatorReader interfaces.ValidatorReader - validatorLock sync.Locker signatureCache cache.Cacher[ids.ID, []byte] messageCache *cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage] offchainAddressedCallMsgs map[ids.ID]*avalancheWarp.UnsignedMessage @@ -73,7 +71,6 @@ func NewBackend( warpSigner avalancheWarp.Signer, blockClient BlockClient, validatorReader interfaces.ValidatorReader, - validatorLock sync.Locker, db database.Database, signatureCache cache.Cacher[ids.ID, []byte], offchainMessages [][]byte, @@ -86,7 +83,6 @@ func NewBackend( blockClient: blockClient, signatureCache: signatureCache, validatorReader: validatorReader, - validatorLock: validatorLock, messageCache: &cache.LRU[ids.ID, *avalancheWarp.UnsignedMessage]{Size: messageCacheSize}, stats: newVerifierStats(), offchainAddressedCallMsgs: make(map[ids.ID]*avalancheWarp.UnsignedMessage), diff --git a/warp/backend_test.go b/warp/backend_test.go index a29f9a4887..cb98a2351c 100644 --- a/warp/backend_test.go +++ b/warp/backend_test.go @@ -5,7 +5,6 @@ package warp import ( "context" - "sync" "testing" "github.com/ava-labs/avalanchego/cache" @@ -46,7 +45,7 @@ func TestAddAndGetValidMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -69,7 +68,7 @@ func TestAddAndGetUnknownMessage(t *testing.T) { require.NoError(t, err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, db, messageSignatureCache, nil) require.NoError(t, err) // Try getting a signature for a message that was not added. @@ -88,7 +87,7 @@ func TestGetBlockSignature(t *testing.T) { require.NoError(err) warpSigner := avalancheWarp.NewSigner(sk, networkID, sourceChainID) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 500} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, blockClient, warptest.NoOpValidatorReader{}, db, messageSignatureCache, nil) require.NoError(err) blockHashPayload, err := payload.NewHash(blkID) @@ -115,7 +114,7 @@ func TestZeroSizedCache(t *testing.T) { // Verify zero sized cache works normally, because the lru cache will be initialized to size 1 for any size parameter <= 0. messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, nil) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, db, messageSignatureCache, nil) require.NoError(t, err) // Add testUnsignedMessage to the warp backend @@ -175,7 +174,7 @@ func TestOffChainMessages(t *testing.T) { db := memdb.New() messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 0} - backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, &sync.RWMutex{}, db, messageSignatureCache, test.offchainMessages) + backend, err := NewBackend(networkID, sourceChainID, warpSigner, nil, warptest.NoOpValidatorReader{}, db, messageSignatureCache, test.offchainMessages) require.ErrorIs(err, test.err) if test.check != nil { test.check(require, backend) diff --git a/warp/handlers/signature_request_test.go b/warp/handlers/signature_request_test.go index b74b9332c3..aa265d9b33 100644 --- a/warp/handlers/signature_request_test.go +++ b/warp/handlers/signature_request_test.go @@ -33,7 +33,7 @@ func TestMessageSignatureHandler(t *testing.T) { require.NoError(t, err) messageSignatureCache := &cache.LRU[ids.ID, []byte]{Size: 100} - backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, snowCtx.Lock.RLocker(), database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) + backend, err := warp.NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, database, messageSignatureCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) msg, err := avalancheWarp.NewUnsignedMessage(snowCtx.NetworkID, snowCtx.ChainID, []byte("test")) @@ -140,7 +140,6 @@ func TestBlockSignatureHandler(t *testing.T) { warpSigner, blockClient, warptest.NoOpValidatorReader{}, - snowCtx.Lock.RLocker(), database, messageSignatureCache, nil, diff --git a/warp/verifier_backend.go b/warp/verifier_backend.go index 23c3dfdabe..255851d601 100644 --- a/warp/verifier_backend.go +++ b/warp/verifier_backend.go @@ -110,24 +110,11 @@ func (b *backend) verifyOffchainAddressedCall(addressedCall *payload.AddressedCa } func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *common.AppError { - b.validatorLock.Lock() - defer b.validatorLock.Unlock() - // first get the validator's nodeID - vdr, err := b.validatorReader.GetValidator(uptimeMsg.ValidationID) + vdr, currentUptime, _, err := b.validatorReader.GetValidatorAndUptime(uptimeMsg.ValidationID) if err != nil { return &common.AppError{ Code: VerifyErrCode, - Message: fmt.Sprintf("failed to get validator for validationID %s: %s", uptimeMsg.ValidationID, err.Error()), - } - } - nodeID := vdr.NodeID - - // then get the current uptime - currentUptime, _, err := b.validatorReader.CalculateUptime(nodeID) - if err != nil { - return &common.AppError{ - Code: VerifyErrCode, - Message: fmt.Sprintf("failed to calculate uptime for nodeID %s: %s", nodeID, err.Error()), + Message: fmt.Sprintf("failed to get uptime for validationID %s: %s", uptimeMsg.ValidationID, err.Error()), } } @@ -136,7 +123,7 @@ func (b *backend) verifyUptimeMessage(uptimeMsg *messages.ValidatorUptime) *comm if currentUptimeSeconds < uptimeMsg.TotalUptime { return &common.AppError{ Code: VerifyErrCode, - Message: fmt.Sprintf("current uptime %d is less than queried uptime %d for nodeID %s", currentUptimeSeconds, uptimeMsg.TotalUptime, nodeID), + Message: fmt.Sprintf("current uptime %d is less than queried uptime %d for nodeID %s", currentUptimeSeconds, uptimeMsg.TotalUptime, vdr.NodeID), } } diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index f5ac8351bc..659c8b94b5 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -102,7 +102,7 @@ func TestAddressedCallSignatures(t *testing.T) { } else { sigCache = &cache.Empty[ids.ID, []byte]{} } - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, snowCtx.Lock.RLocker(), database, sigCache, [][]byte{offchainMessage.Bytes()}) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, warptest.NoOpValidatorReader{}, database, sigCache, [][]byte{offchainMessage.Bytes()}) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) @@ -218,7 +218,6 @@ func TestBlockSignatures(t *testing.T) { warpSigner, blockClient, warptest.NoOpValidatorReader{}, - snowCtx.Lock.RLocker(), database, sigCache, nil, @@ -293,7 +292,7 @@ func TestUptimeSignatures(t *testing.T) { validatorsManager, err := validators.NewManager(chainCtx, memdb.New(), clk) require.NoError(t, err) validatorsManager.StartTracking([]ids.NodeID{}) - warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, validatorsManager, snowCtx.Lock.RLocker(), database, sigCache, nil) + warpBackend, err := NewBackend(snowCtx.NetworkID, snowCtx.ChainID, warpSigner, warptest.EmptyBlockClient, validatorsManager, database, sigCache, nil) require.NoError(t, err) handler := acp118.NewCachedHandler(sigCache, warpBackend, warpSigner) diff --git a/warp/warptest/noop_validator_reader.go b/warp/warptest/noop_validator_reader.go index 01e6e31d7a..03171f84f5 100644 --- a/warp/warptest/noop_validator_reader.go +++ b/warp/warptest/noop_validator_reader.go @@ -8,7 +8,6 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/subnet-evm/plugin/evm/validators/interfaces" stateinterfaces "github.com/ava-labs/subnet-evm/plugin/evm/validators/state/interfaces" ) @@ -17,26 +16,6 @@ var _ interfaces.ValidatorReader = &NoOpValidatorReader{} type NoOpValidatorReader struct{} -func (NoOpValidatorReader) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { - return 0, time.Time{}, nil -} - -func (NoOpValidatorReader) CalculateUptimePercent(nodeID ids.NodeID) (float64, error) { - return 0, nil -} - -func (NoOpValidatorReader) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) { - return 0, nil -} - -func (NoOpValidatorReader) GetNodeIDs() set.Set[ids.NodeID] { - return set.Set[ids.NodeID]{} -} - -func (NoOpValidatorReader) GetValidationIDs() set.Set[ids.ID] { - return set.Set[ids.ID]{} -} - -func (NoOpValidatorReader) GetValidator(vID ids.ID) (stateinterfaces.Validator, error) { - return stateinterfaces.Validator{}, nil +func (NoOpValidatorReader) GetValidatorAndUptime(ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) { + return stateinterfaces.Validator{}, 0, time.Time{}, nil } From c8818840b2adbdf67abae57b7d2b861e48fed7b9 Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 15 Nov 2024 21:08:34 +0300 Subject: [PATCH 97/98] Update plugin/evm/validators/interfaces/interfaces.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/validators/interfaces/interfaces.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/evm/validators/interfaces/interfaces.go b/plugin/evm/validators/interfaces/interfaces.go index d3a1e3c384..d8b88b3626 100644 --- a/plugin/evm/validators/interfaces/interfaces.go +++ b/plugin/evm/validators/interfaces/interfaces.go @@ -13,7 +13,7 @@ import ( ) type ValidatorReader interface { - // GetValidatorUptime returns the uptime of the validator specified by validationID + // GetValidatorAndUptime returns the uptime of the validator specified by validationID GetValidatorAndUptime(validationID ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) } From c91b3a764887989a6fc507e93a2c0e49590b724a Mon Sep 17 00:00:00 2001 From: Ceyhun Onur Date: Fri, 15 Nov 2024 21:08:43 +0300 Subject: [PATCH 98/98] Update plugin/evm/validators/manager.go Co-authored-by: Darioush Jalali Signed-off-by: Ceyhun Onur --- plugin/evm/validators/manager.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/evm/validators/manager.go b/plugin/evm/validators/manager.go index 51be7606ad..e679be220c 100644 --- a/plugin/evm/validators/manager.go +++ b/plugin/evm/validators/manager.go @@ -56,9 +56,9 @@ func NewManager( }, nil } -// GetValidatorUptime returns the calculated uptime of the validator specified by validationID +// GetValidatorAndUptime returns the calculated uptime of the validator specified by validationID // and the last updated time. -// GetValidatorUptime holds the chain context lock while performing the operation and can be called concurrently. +// GetValidatorAndUptime holds the chain context lock while performing the operation and can be called concurrently. func (m *manager) GetValidatorAndUptime(validationID ids.ID) (stateinterfaces.Validator, time.Duration, time.Time, error) { // lock the state m.chainCtx.Lock.RLock()