diff --git a/cmd/gossamer/README.md b/cmd/gossamer/README.md index e27e878603..272fb7c7da 100644 --- a/cmd/gossamer/README.md +++ b/cmd/gossamer/README.md @@ -14,6 +14,7 @@ cd gossamer ### Compile To put the binary in ./bin, run: + ```bash make build ``` @@ -62,7 +63,7 @@ The node configuration can be modified in the `config.toml` file. ### Start the node ```bash -gossamer --basepath /tmp/gossamer --key alice +gossamer --basepath /tmp/gossamer --key alice ``` **Note: The `init` command is optional. If the node is not initialised, it will be initialised with the default configuration.** @@ -108,12 +109,14 @@ This subcommand provides capabilities that are similar to [Parity's Subkey utility](https://docs.substrate.io/v3/tools/subkey). The account command supports following arguments: + - `generate` - generates a new key pair; specify `--scheme ed25519`, `--scheme secp256k1`, or `--scheme sr25519` (default) - `list` - lists the keys in the Gossamer keystore - `import` - imports a key from a keystore file - `import-raw` - imports a raw key from a keystore file Supported flags: + - `keystore-path` - path to the Gossamer keystore - `keystore-file` - path to the keystore file - `chain` - path to the human-readable chain-spec file @@ -121,6 +124,7 @@ Supported flags: - `--password` - allows the user to provide a password to either encrypt a generated key or unlock the Gossamer keystore Examples: + - `gossamer account generate --scheme ed25519` - generates an `ed25519` key pair - `gossamer account list` - lists the keys in the Gossamer keystore - `gossamer account import --keystore-file keystore.json` - imports a key from a keystore file @@ -145,6 +149,7 @@ represent the Gossamer default configuration. - `--output-path` - path to the file where the compiled chain-spec should be written Examples: + - `gossamer build-spec --chain chain-spec.json --output-path compiled-chain-spec.json` - compiles a human-readable chain-spec into a format that Gossamer can consume - `gossamer build-spec --chain chain-spec.json --raw --output-path compiled-chain-spec.json` - compiles a human-readable @@ -166,6 +171,7 @@ of a JSON file. The input for this subcommand can be retrieved from - `--chain` - path to the human-readable chain-spec file Examples: + - `gossamer import-state --first-slot 1 --header header.json --state state.json --chain chain-spec.json` - seeds Gossamer storage with key-value pairs from a JSON file @@ -185,6 +191,7 @@ What follows is a list that describes the services and capabilities that inform #### State +// TODO: update the `State` description to pebble db This service is a wrapper around an instance of [`chaindb`](https://github.com/ChainSafe/chaindb), a key-value database that is built on top of [BadgerDB](https://github.com/dgraph-io/badger) from [Dgraph](https://dgraph.io/). The state service provides storage capabilities for the other Gossamer services - each service is assigned a prefix that is added @@ -271,4 +278,4 @@ capabilities are defined in the [dot/telemetry](../../dot/telemetry) package and The default listening address for Prometheus metrics is `localhost:9876`, and Gossamer allows the user to configure this parameter with the `--metrics-address` command-line parameter. The Gossamer telemetry server publishes telemetry data that is compatible with [Polkadot Telemetry](https://github.com/paritytech/substrate-telemetry) and -[its helpful UI](https://telemetry.polkadot.io/). \ No newline at end of file +[its helpful UI](https://telemetry.polkadot.io/). diff --git a/dot/core/service.go b/dot/core/service.go index 4d114072c5..d528e8aa22 100644 --- a/dot/core/service.go +++ b/dot/core/service.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "sync" + "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/types" @@ -100,6 +101,26 @@ func NewService(cfg *Config) (*Service, error) { // Start starts the core service func (s *Service) Start() error { go s.handleBlocksAsync() + + go func() { + time.Sleep(30 * time.Second) + header, err := s.blockState.BestBlockHeader() + if err != nil { + panic(err) + } + + ba := &network.BlockAnnounceMessage{ + ParentHash: header.ParentHash, + Number: header.Number, + StateRoot: header.StateRoot, + ExtrinsicsRoot: header.ExtrinsicsRoot, + Digest: header.Digest, + BestBlock: true, + } + + s.net.GossipMessage(ba) + }() + return nil } diff --git a/dot/network/discovery.go b/dot/network/discovery.go index e8cb8212c1..ca9d47adb2 100644 --- a/dot/network/discovery.go +++ b/dot/network/discovery.go @@ -28,7 +28,7 @@ var ( startDHTTimeout = time.Second * 10 initialAdvertisementTimeout = time.Millisecond tryAdvertiseTimeout = time.Second * 30 - connectToPeersTimeout = time.Minute * 5 + connectToPeersTimeout = time.Minute findPeersTimeout = time.Minute ) @@ -183,7 +183,7 @@ func (d *discovery) checkPeerCount() { case <-d.ctx.Done(): return case <-ticker.C: - if len(d.h.Network().Peers()) > d.minPeers { + if len(d.h.Network().Peers()) >= d.maxPeers { continue } diff --git a/dot/network/errors.go b/dot/network/errors.go index ef895a6735..640f15c4ba 100644 --- a/dot/network/errors.go +++ b/dot/network/errors.go @@ -8,6 +8,8 @@ import ( ) var ( + ErrReceivedEmptyMessage = errors.New("received empty message") + errCannotValidateHandshake = errors.New("failed to validate handshake") errMessageTypeNotValid = errors.New("message type is not valid") errInvalidHandshakeForPeer = errors.New("peer previously sent invalid handshake") diff --git a/dot/network/message.go b/dot/network/message.go index 144abe78cf..086d489d7a 100644 --- a/dot/network/message.go +++ b/dot/network/message.go @@ -17,6 +17,9 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) +// MaxBlocksInResponse is maximum number of block data a BlockResponse message can contain +const MaxBlocksInResponse = 128 + type MessageType byte // Message types for notifications protocol messages. Used internally to map message to protocol. @@ -44,6 +47,9 @@ const ( RequestedDataReceipt = byte(4) RequestedDataMessageQueue = byte(8) RequestedDataJustification = byte(16) + BootstrapRequestData = RequestedDataHeader + + RequestedDataBody + + RequestedDataJustification ) var _ Message = (*BlockRequestMessage)(nil) @@ -354,3 +360,56 @@ func (cm *ConsensusMessage) Hash() (common.Hash, error) { } return common.Blake2bHash(encMsg) } + +func NewBlockRequest(startingBlock variadic.Uint32OrHash, amount uint32, + requestedData byte, direction SyncDirection) *BlockRequestMessage { + return &BlockRequestMessage{ + RequestedData: requestedData, + StartingBlock: startingBlock, + Direction: direction, + Max: &amount, + } +} + +func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byte) []*BlockRequestMessage { + if startNumber > targetNumber { + return []*BlockRequestMessage{} + } + + diff := targetNumber - startNumber + + // start and end block are the same, just request 1 block + if diff == 0 { + return []*BlockRequestMessage{ + NewBlockRequest(*variadic.MustNewUint32OrHash(uint32(startNumber)), 1, requestedData, Ascending), + } + } + + numRequests := diff / MaxBlocksInResponse + // we should check if the diff is in the maxResponseSize bounds + // otherwise we should increase the numRequests by one, take this + // example, we want to sync from 0 to 259, the diff is 259 + // then the num of requests is 2 (uint(259)/uint(128)) however two requests will + // retrieve only 256 blocks (each request can retrieve a max of 128 blocks), so we should + // create one more request to retrieve those missing blocks, 3 in this example. + missingBlocks := diff % MaxBlocksInResponse + if missingBlocks != 0 { + numRequests++ + } + + reqs := make([]*BlockRequestMessage, numRequests) + for i := uint(0); i < numRequests; i++ { + max := uint32(MaxBlocksInResponse) + + lastIteration := numRequests - 1 + if i == lastIteration && missingBlocks != 0 { + max = uint32(missingBlocks) + } + + start := variadic.MustNewUint32OrHash(startNumber) + reqs[i] = NewBlockRequest(*start, max, requestedData, Ascending) + startNumber += uint(max) + } + + return reqs +} diff --git a/dot/network/message_test.go b/dot/network/message_test.go index 44aed673a9..839cf0e228 100644 --- a/dot/network/message_test.go +++ b/dot/network/message_test.go @@ -422,3 +422,122 @@ func TestDecodeConsensusMessage(t *testing.T) { require.NoError(t, err) require.Equal(t, encMsg, encodedMessage) } + +func TestAscendingBlockRequest(t *testing.T) { + one := uint32(1) + three := uint32(3) + maxResponseSize := uint32(MaxBlocksInResponse) + cases := map[string]struct { + startNumber, targetNumber uint + expectedBlockRequestMessage []*BlockRequestMessage + }{ + "start_greater_than_target": { + startNumber: 10, + targetNumber: 0, + expectedBlockRequestMessage: []*BlockRequestMessage{}, + }, + + "no_difference_between_start_and_target": { + startNumber: 10, + targetNumber: 10, + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(10)), + Direction: Ascending, + Max: &one, + }, + }, + }, + + "requesting_128_blocks": { + startNumber: 0, + targetNumber: 128, + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: Ascending, + Max: &maxResponseSize, + }, + }, + }, + + "requesting_4_chunks_of_128_blocks": { + startNumber: 0, + targetNumber: 512, // 128 * 4 + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + Direction: Ascending, + Max: &maxResponseSize, + }, + }, + }, + + "requesting_4_chunks_of_128_plus_3_blocks": { + startNumber: 0, + targetNumber: (128 * 4) + 3, + expectedBlockRequestMessage: []*BlockRequestMessage{ + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(0)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(128)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(256)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(384)), + Direction: Ascending, + Max: &maxResponseSize, + }, + { + RequestedData: BootstrapRequestData, + StartingBlock: *variadic.MustNewUint32OrHash(uint32(512)), + Direction: Ascending, + Max: &three, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + + t.Run(tname, func(t *testing.T) { + requests := NewAscendingBlockRequests(tt.startNumber, tt.targetNumber, BootstrapRequestData) + require.Equal(t, tt.expectedBlockRequestMessage, requests) + }) + } +} diff --git a/dot/network/service.go b/dot/network/service.go index e2da8d43d3..4cce2e486e 100644 --- a/dot/network/service.go +++ b/dot/network/service.go @@ -605,6 +605,11 @@ func (s *Service) NetworkState() common.NetworkState { } } +// AllConnectedPeersID returns all the connected to the node instance +func (s *Service) AllConnectedPeersID() []peer.ID { + return s.host.p2pHost.Network().Peers() +} + // Peers returns information about connected peers needed for the rpc server func (s *Service) Peers() []common.PeerInfo { var peers []common.PeerInfo diff --git a/dot/network/sync.go b/dot/network/sync.go index efef281203..ce96ae9d70 100644 --- a/dot/network/sync.go +++ b/dot/network/sync.go @@ -4,16 +4,10 @@ package network import ( - "time" - libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" ) -var ( - BlockRequestTimeout = time.Second * 20 -) - // handleSyncStream handles streams with the /sync/2 protocol ID func (s *Service) handleSyncStream(stream libp2pnetwork.Stream) { if stream == nil { diff --git a/dot/node_integration_test.go b/dot/node_integration_test.go index 283437a2af..d49e0c1421 100644 --- a/dot/node_integration_test.go +++ b/dot/node_integration_test.go @@ -57,7 +57,6 @@ func TestNewNode(t *testing.T) { initConfig.Account.Key = "alice" initConfig.Core.Role = common.FullNodeRole initConfig.Core.WasmInterpreter = wazero_runtime.Name - initConfig.Log.Digest = "critical" networkConfig := &network.Config{ @@ -86,7 +85,7 @@ func TestNewNode(t *testing.T) { assert.NoError(t, err) mockServiceRegistry := NewMockServiceRegisterer(ctrl) - mockServiceRegistry.EXPECT().RegisterService(gomock.Any()).Times(8) + mockServiceRegistry.EXPECT().RegisterService(gomock.Any()).Times(9) m := NewMocknodeBuilderIface(ctrl) m.EXPECT().isNodeInitialised(initConfig.BasePath).Return(nil) @@ -325,6 +324,7 @@ func TestStartStopNode(t *testing.T) { config.ChainSpec = genFile config.Core.GrandpaAuthority = false config.Core.BabeAuthority = false + config.Network.MinPeers = 0 err := InitNode(config) require.NoError(t, err) diff --git a/dot/rpc/modules/chain_integration_test.go b/dot/rpc/modules/chain_integration_test.go index 7b290b3aec..259149c696 100644 --- a/dot/rpc/modules/chain_integration_test.go +++ b/dot/rpc/modules/chain_integration_test.go @@ -18,9 +18,9 @@ import ( "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/lib/utils" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" "github.com/golang/mock/gomock" - database "github.com/ChainSafe/chaindb" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/stretchr/testify/require" ) @@ -113,7 +113,7 @@ func TestChainGetHeader_NotFound(t *testing.T) { req := &ChainHashRequest{Bhash: &bhash} err = svc.GetHeader(nil, req, res) - require.EqualError(t, err, database.ErrKeyNotFound.Error()) + require.EqualError(t, err, pebble.ErrNotFound.Error()) } func TestChainGetBlock_Genesis(t *testing.T) { @@ -212,7 +212,7 @@ func TestChainGetBlock_NoFound(t *testing.T) { req := &ChainHashRequest{Bhash: &bhash} err = svc.GetBlock(nil, req, res) - require.EqualError(t, err, database.ErrKeyNotFound.Error()) + require.EqualError(t, err, pebble.ErrNotFound.Error()) } func TestChainGetBlockHash_Latest(t *testing.T) { diff --git a/dot/rpc/modules/childstate_integration_test.go b/dot/rpc/modules/childstate_integration_test.go index a46e8245b5..e47aec962c 100644 --- a/dot/rpc/modules/childstate_integration_test.go +++ b/dot/rpc/modules/childstate_integration_test.go @@ -10,10 +10,10 @@ import ( "fmt" "testing" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/trie" + "github.com/cockroachdb/pebble" "github.com/stretchr/testify/require" ) @@ -97,7 +97,7 @@ func TestChildStateGetStorageSize(t *testing.T) { keyChild: []byte(":not_exist"), }, { - err: chaindb.ErrKeyNotFound, + err: pebble.ErrNotFound, hash: &invalidHash, }, } @@ -155,7 +155,7 @@ func TestGetStorageHash(t *testing.T) { keyChild: []byte(":not_exist"), }, { - err: chaindb.ErrKeyNotFound, + err: pebble.ErrNotFound, hash: &invalidBlockHash, }, } diff --git a/dot/services.go b/dot/services.go index 11682556ff..6701757041 100644 --- a/dot/services.go +++ b/dot/services.go @@ -7,10 +7,10 @@ import ( "errors" "fmt" "strings" + "time" cfg "github.com/ChainSafe/gossamer/config" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/core" "github.com/ChainSafe/gossamer/dot/digest" "github.com/ChainSafe/gossamer/dot/network" @@ -20,6 +20,7 @@ import ( "github.com/ChainSafe/gossamer/dot/sync" "github.com/ChainSafe/gossamer/dot/system" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/internal/metrics" "github.com/ChainSafe/gossamer/internal/pprof" @@ -56,7 +57,7 @@ type rpcServiceSettings struct { syncer *sync.Service } -func newInMemoryDB() (*chaindb.BadgerDB, error) { +func newInMemoryDB() (database.Database, error) { return utils.SetupDatabase("", true) } @@ -110,7 +111,7 @@ func (nodeBuilder) createRuntimeStorage(st *state.Service) (*runtime.NodeStorage return &runtime.NodeStorage{ LocalStorage: localStorage, - PersistentStorage: chaindb.NewTable(st.DB(), "offlinestorage"), + PersistentStorage: database.NewTable(st.DB(), "offlinestorage"), BaseDB: st.Base, }, nil } @@ -493,6 +494,13 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc if err != nil { return nil, fmt.Errorf("failed to parse sync log level: %w", err) } + + const blockRequestTimeout = time.Second * 20 + requestMaker := net.GetRequestResponseProtocol( + network.SyncID, + blockRequestTimeout, + network.MaxBlockResponseSize) + syncCfg := &sync.Config{ LogLvl: syncLogLevel, Network: net, @@ -507,12 +515,10 @@ func (nodeBuilder) newSyncService(config *cfg.Config, st *state.Service, fg Bloc SlotDuration: slotDuration, Telemetry: telemetryMailer, BadBlocks: genesisData.BadBlocks, + RequestMaker: requestMaker, } - blockReqRes := net.GetRequestResponseProtocol(network.SyncID, network.BlockRequestTimeout, - network.MaxBlockResponseSize) - - return sync.NewService(syncCfg, blockReqRes) + return sync.NewService(syncCfg) } func (nodeBuilder) createDigestHandler(st *state.Service) (*digest.Handler, error) { diff --git a/dot/state/block.go b/dot/state/block.go index be1729b29d..1ed2d1929d 100644 --- a/dot/state/block.go +++ b/dot/state/block.go @@ -11,12 +11,13 @@ import ( "sync" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/runtime" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "golang.org/x/exp/slices" @@ -73,11 +74,11 @@ type BlockState struct { } // NewBlockState will create a new BlockState backed by the database located at basePath -func NewBlockState(db *chaindb.BadgerDB, trs *Tries, telemetry Telemetry) (*BlockState, error) { +func NewBlockState(db database.Database, trs *Tries, telemetry Telemetry) (*BlockState, error) { bs := &BlockState{ dbPath: db.Path(), baseState: NewBaseState(db), - db: chaindb.NewTable(db, blockPrefix), + db: database.NewTable(db, blockPrefix), unfinalisedBlocks: newHashToBlockMap(), tries: trs, imported: make(map[chan *types.Block]struct{}), @@ -105,12 +106,12 @@ func NewBlockState(db *chaindb.BadgerDB, trs *Tries, telemetry Telemetry) (*Bloc // NewBlockStateFromGenesis initialises a BlockState from a genesis header, // saving it to the database located at basePath -func NewBlockStateFromGenesis(db *chaindb.BadgerDB, trs *Tries, header *types.Header, +func NewBlockStateFromGenesis(db database.Database, trs *Tries, header *types.Header, telemetryMailer Telemetry) (*BlockState, error) { bs := &BlockState{ bt: blocktree.NewBlockTreeFromRoot(header), baseState: NewBaseState(db), - db: chaindb.NewTable(db, blockPrefix), + db: database.NewTable(db, blockPrefix), unfinalisedBlocks: newHashToBlockMap(), tries: trs, imported: make(map[chan *types.Block]struct{}), @@ -211,7 +212,7 @@ func (bs *BlockState) GetHeader(hash common.Hash) (header *types.Header, err err } if has, _ := bs.HasHeader(hash); !has { - return nil, chaindb.ErrKeyNotFound + return nil, pebble.ErrNotFound } data, err := bs.db.Get(headerKey(hash)) @@ -226,7 +227,7 @@ func (bs *BlockState) GetHeader(hash common.Hash) (header *types.Header, err err } if result.Empty() { - return nil, chaindb.ErrKeyNotFound + return nil, pebble.ErrNotFound } result.Hash() @@ -644,7 +645,7 @@ func (bs *BlockState) Range(startHash, endHash common.Hash) (hashes []common.Has } endHeader, err := bs.loadHeaderFromDatabase(endHash) - if errors.Is(err, chaindb.ErrKeyNotFound) || + if errors.Is(err, pebble.ErrNotFound) || errors.Is(err, ErrEmptyHeader) { // end hash is not in the database so we should lookup the // block that could be in memory and in the database as well diff --git a/dot/state/block_race_test.go b/dot/state/block_race_test.go index 18a1c26fdc..3f1ace26bd 100644 --- a/dot/state/block_race_test.go +++ b/dot/state/block_race_test.go @@ -9,10 +9,10 @@ import ( "testing" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/trie" "github.com/golang/mock/gomock" - "github.com/ChainSafe/chaindb" "github.com/stretchr/testify/require" ) @@ -22,7 +22,7 @@ func TestConcurrencySetHeader(t *testing.T) { telemetryMock.EXPECT().SendMessage(gomock.Any()).AnyTimes() threads := runtime.NumCPU() - dbs := make([]*chaindb.BadgerDB, threads) + dbs := make([]database.Database, threads) for i := 0; i < threads; i++ { dbs[i] = NewInMemoryDB(t) } diff --git a/dot/state/block_test.go b/dot/state/block_test.go index f4b9cf7f23..6e91d9e668 100644 --- a/dot/state/block_test.go +++ b/dot/state/block_test.go @@ -8,12 +8,12 @@ import ( "testing" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" @@ -964,7 +964,7 @@ func TestRange(t *testing.T) { "start_hash_in_memory_while_end_hash_in_database": { blocksToCreate: 128, blocksToPersistAtDisk: 64, - wantErr: chaindb.ErrKeyNotFound, + wantErr: pebble.ErrNotFound, stringErr: "range start should be in database: " + "querying database: Key not found", newBlockState: func(t *testing.T, ctrl *gomock.Controller, diff --git a/dot/state/epoch.go b/dot/state/epoch.go index 6dc18546b2..89e5c1d461 100644 --- a/dot/state/epoch.go +++ b/dot/state/epoch.go @@ -10,10 +10,11 @@ import ( "sync" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" ) var ( @@ -67,7 +68,7 @@ type EpochState struct { } // NewEpochStateFromGenesis returns a new EpochState given information for the first epoch, fetched from the runtime -func NewEpochStateFromGenesis(db *chaindb.BadgerDB, blockState *BlockState, +func NewEpochStateFromGenesis(db database.Database, blockState *BlockState, genesisConfig *types.BabeConfiguration) (*EpochState, error) { baseState := NewBaseState(db) @@ -76,7 +77,7 @@ func NewEpochStateFromGenesis(db *chaindb.BadgerDB, blockState *BlockState, return nil, err } - epochDB := chaindb.NewTable(db, epochPrefix) + epochDB := database.NewTable(db, epochPrefix) err = epochDB.Put(currentEpochKey, []byte{0, 0, 0, 0, 0, 0, 0, 0}) if err != nil { return nil, err @@ -133,7 +134,7 @@ func NewEpochStateFromGenesis(db *chaindb.BadgerDB, blockState *BlockState, } // NewEpochState returns a new EpochState -func NewEpochState(db *chaindb.BadgerDB, blockState *BlockState) (*EpochState, error) { +func NewEpochState(db database.Database, blockState *BlockState) (*EpochState, error) { baseState := NewBaseState(db) epochLength, err := baseState.loadEpochLength() @@ -149,7 +150,7 @@ func NewEpochState(db *chaindb.BadgerDB, blockState *BlockState) (*EpochState, e return &EpochState{ baseState: baseState, blockState: blockState, - db: chaindb.NewTable(db, epochPrefix), + db: database.NewTable(db, epochPrefix), epochLength: epochLength, skipToEpoch: skipToEpoch, nextEpochData: make(nextEpochMap[types.NextEpochData]), @@ -252,7 +253,7 @@ func (s *EpochState) SetEpochData(epoch uint64, info *types.EpochData) error { // if the header params is nil then it will search only in database func (s *EpochState) GetEpochData(epoch uint64, header *types.Header) (*types.EpochData, error) { epochData, err := s.getEpochDataFromDatabase(epoch) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + if err != nil && !errors.Is(err, pebble.ErrNotFound) { return nil, fmt.Errorf("failed to retrieve epoch data from database: %w", err) } @@ -335,7 +336,7 @@ func (s *EpochState) setLatestConfigData(epoch uint64) error { func (s *EpochState) GetConfigData(epoch uint64, header *types.Header) (configData *types.ConfigData, err error) { for tryEpoch := int(epoch); tryEpoch >= 0; tryEpoch-- { configData, err = s.getConfigDataFromDatabase(uint64(tryEpoch)) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + if err != nil && !errors.Is(err, pebble.ErrNotFound) { return nil, fmt.Errorf("failed to retrieve config epoch from database: %w", err) } @@ -445,7 +446,7 @@ func (nem nextEpochMap[T]) Retrieve(blockState *BlockState, epoch uint64, header // sometimes while moving to the next epoch is possible the header // is not fully imported by the blocktree, in this case we will use // its parent header which migth be already imported. - if errors.Is(err, chaindb.ErrKeyNotFound) { + if errors.Is(err, pebble.ErrNotFound) { parentHeader, err := blockState.GetHeader(header.ParentHash) if err != nil { return nil, fmt.Errorf("cannot get parent header: %w", err) @@ -588,9 +589,9 @@ func (s *EpochState) FinalizeBABENextEpochData(finalizedHeader *types.Header) er epochInDatabase, err := s.getEpochDataFromDatabase(nextEpoch) - // if an error occurs and the error is chaindb.ErrKeyNotFound we ignore + // if an error occurs and the error is pebble.ErrNotFound we ignore // since this error is what we will handle in the next lines - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + if err != nil && !errors.Is(err, pebble.ErrNotFound) { return fmt.Errorf("cannot check if next epoch data is already defined for epoch %d: %w", nextEpoch, err) } @@ -649,9 +650,9 @@ func (s *EpochState) FinalizeBABENextConfigData(finalizedHeader *types.Header) e configInDatabase, err := s.getConfigDataFromDatabase(nextEpoch) - // if an error occurs and the error is chaindb.ErrKeyNotFound we ignore + // if an error occurs and the error is pebble.ErrNotFound we ignore // since this error is what we will handle in the next lines - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + if err != nil && !errors.Is(err, pebble.ErrNotFound) { return fmt.Errorf("cannot check if next epoch config is already defined for epoch %d: %w", nextEpoch, err) } diff --git a/dot/state/grandpa.go b/dot/state/grandpa.go index 68c5ee9d8f..fa4780aba6 100644 --- a/dot/state/grandpa.go +++ b/dot/state/grandpa.go @@ -9,11 +9,12 @@ import ( "fmt" "strconv" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" ) var ( @@ -46,9 +47,9 @@ type GrandpaState struct { } // NewGrandpaStateFromGenesis returns a new GrandpaState given the grandpa genesis authorities -func NewGrandpaStateFromGenesis(db *chaindb.BadgerDB, bs *BlockState, +func NewGrandpaStateFromGenesis(db database.Database, bs *BlockState, genesisAuthorities []types.GrandpaVoter, telemetry Telemetry) (*GrandpaState, error) { - grandpaDB := chaindb.NewTable(db, grandpaPrefix) + grandpaDB := database.NewTable(db, grandpaPrefix) s := &GrandpaState{ db: grandpaDB, blockState: bs, @@ -77,9 +78,9 @@ func NewGrandpaStateFromGenesis(db *chaindb.BadgerDB, bs *BlockState, } // NewGrandpaState returns a new GrandpaState -func NewGrandpaState(db *chaindb.BadgerDB, bs *BlockState, telemetry Telemetry) *GrandpaState { +func NewGrandpaState(db database.Database, bs *BlockState, telemetry Telemetry) *GrandpaState { return &GrandpaState{ - db: chaindb.NewTable(db, grandpaPrefix), + db: database.NewTable(db, grandpaPrefix), blockState: bs, scheduledChangeRoots: new(changeTree), forcedChanges: new(orderedPendingChanges), @@ -459,7 +460,7 @@ func (s *GrandpaState) GetSetIDByBlockNumber(blockNumber uint) (uint64, error) { for { changeUpper, err := s.GetSetIDChange(curr + 1) - if errors.Is(err, chaindb.ErrKeyNotFound) { + if errors.Is(err, pebble.ErrNotFound) { if curr == 0 { return 0, nil } @@ -502,7 +503,7 @@ func (s *GrandpaState) SetNextPause(number uint) error { } // GetNextPause returns the block number of the next grandpa pause. -// If the key is not found in the database, the error chaindb.ErrKeyNotFound +// If the key is not found in the database, the error pebble.ErrNotFound // is returned. func (s *GrandpaState) GetNextPause() (blockNumber uint, err error) { value, err := s.db.Get(pauseKey) @@ -520,7 +521,7 @@ func (s *GrandpaState) SetNextResume(number uint) error { } // GetNextResume returns the block number of the next grandpa resume. -// If the key is not found in the database, the error chaindb.ErrKeyNotFound +// If the key is not found in the database, the error pebble.ErrNotFound // is returned. func (s *GrandpaState) GetNextResume() (blockNumber uint, err error) { value, err := s.db.Get(resumeKey) diff --git a/dot/state/grandpa_test.go b/dot/state/grandpa_test.go index 53b3c5d1b8..3d09c98e59 100644 --- a/dot/state/grandpa_test.go +++ b/dot/state/grandpa_test.go @@ -7,9 +7,9 @@ import ( "fmt" "testing" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" @@ -124,7 +124,7 @@ func TestGrandpaState_LatestRound(t *testing.T) { require.Equal(t, uint64(99), r) } -func testBlockState(t *testing.T, db *chaindb.BadgerDB) *BlockState { +func testBlockState(t *testing.T, db database.Database) *BlockState { ctrl := gomock.NewController(t) telemetryMock := NewMockTelemetry(ctrl) telemetryMock.EXPECT().SendMessage(gomock.AssignableToTypeOf(&telemetry.NotifyFinalized{})) diff --git a/dot/state/initialize.go b/dot/state/initialize.go index 5c923efbbf..ad627cd8b7 100644 --- a/dot/state/initialize.go +++ b/dot/state/initialize.go @@ -7,8 +7,8 @@ import ( "fmt" "path/filepath" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/genesis" "github.com/ChainSafe/gossamer/lib/runtime" @@ -37,11 +37,7 @@ func (s *Service) Initialise(gen *genesis.Genesis, header *types.Header, t *trie s.db = db - if err = db.ClearAll(); err != nil { - return fmt.Errorf("failed to clear database: %s", err) - } - - if err = t.WriteDirty(chaindb.NewTable(db, storagePrefix)); err != nil { + if err = t.WriteDirty(database.NewTable(db, storagePrefix)); err != nil { return fmt.Errorf("failed to write genesis trie to database: %w", err) } @@ -137,7 +133,7 @@ func loadGrandpaAuthorities(t *trie.Trie) ([]types.GrandpaVoter, error) { // storeInitialValues writes initial genesis values to the state database func (s *Service) storeInitialValues(data *genesis.Data, t *trie.Trie) error { // write genesis trie to database - if err := t.WriteDirty(chaindb.NewTable(s.db, storagePrefix)); err != nil { + if err := t.WriteDirty(database.NewTable(s.db, storagePrefix)); err != nil { return fmt.Errorf("failed to write trie to database: %s", err) } diff --git a/dot/state/interfaces.go b/dot/state/interfaces.go index bae7631e51..f57caec2db 100644 --- a/dot/state/interfaces.go +++ b/dot/state/interfaces.go @@ -6,8 +6,8 @@ package state import ( "encoding/json" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" ) // GetPutDeleter has methods to get, put and delete key values. @@ -58,7 +58,7 @@ type Haser interface { // NewBatcher creates a new database batch. type NewBatcher interface { - NewBatch() chaindb.Batch + NewBatch() database.Batch } // BabeConfigurer returns the babe configuration of the runtime. diff --git a/dot/state/mocks_test.go b/dot/state/mocks_test.go index 1ca5e3196a..70617922a3 100644 --- a/dot/state/mocks_test.go +++ b/dot/state/mocks_test.go @@ -8,7 +8,7 @@ import ( json "encoding/json" reflect "reflect" - chaindb "github.com/ChainSafe/chaindb" + database "github.com/ChainSafe/gossamer/internal/database" gomock "github.com/golang/mock/gomock" ) @@ -115,10 +115,10 @@ func (mr *MockBlockStateDatabaseMockRecorder) Has(arg0 interface{}) *gomock.Call } // NewBatch mocks base method. -func (m *MockBlockStateDatabase) NewBatch() chaindb.Batch { +func (m *MockBlockStateDatabase) NewBatch() database.Batch { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewBatch") - ret0, _ := ret[0].(chaindb.Batch) + ret0, _ := ret[0].(database.Batch) return ret0 } diff --git a/dot/state/offline_pruner.go b/dot/state/offline_pruner.go index f2104694c9..7eb8e95b4f 100644 --- a/dot/state/offline_pruner.go +++ b/dot/state/offline_pruner.go @@ -5,15 +5,15 @@ package state import ( "bytes" + "errors" "fmt" "os" - "github.com/ChainSafe/chaindb" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/lib/utils" - "github.com/dgraph-io/badger/v2" - "github.com/dgraph-io/badger/v2/pb" + "github.com/cockroachdb/pebble" ) // OfflinePruner is a tool to prune the stale state with the help of @@ -21,10 +21,10 @@ import ( // - iterate the storage state, reconstruct the relevant state tries // - iterate the database, stream all the targeted keys to new DB type OfflinePruner struct { - inputDB *chaindb.BadgerDB + inputDB database.Database storageState *StorageState blockState *BlockState - filterDatabase *chaindb.BadgerDB + filterDatabase database.Database bestBlockHash common.Hash retainBlockNum uint32 @@ -66,10 +66,7 @@ func NewOfflinePruner(inputDBPath string, } }() - filterDatabaseOptions := &chaindb.Config{ - DataDir: filterDatabaseDir, - } - filterDatabase, err := chaindb.NewBadgerDB(filterDatabaseOptions) + filterDatabase, err := database.NewPebble(filterDatabaseDir, false) if err != nil { return nil, fmt.Errorf("creating badger filter database: %w", err) } @@ -155,7 +152,7 @@ func (p *OfflinePruner) SetBloomFilter() (err error) { // Prune starts streaming the data from input db to the pruned db. func (p *OfflinePruner) Prune() error { - inputDB, err := utils.LoadBadgerDB(p.inputDBPath) + inputDB, err := pebble.Open(p.inputDBPath, &pebble.Options{}) if err != nil { return fmt.Errorf("failed to load DB %w", err) } @@ -172,33 +169,48 @@ func (p *OfflinePruner) Prune() error { }() storagePrefixBytes := []byte(storagePrefix) - stream := inputDB.NewStream() - stream.ChooseKey = func(item *badger.Item) bool { - key := item.Key() - if !bytes.HasPrefix(key, storagePrefixBytes) { - // Ignore non-storage keys - return false + + keyUpperBound := func(b []byte) []byte { + end := make([]byte, len(b)) + copy(end, b) + for i := len(end) - 1; i >= 0; i-- { + end[i] = end[i] + 1 + if end[i] != 0 { + return end[:i+1] + } } + return nil // no upper-bound + } + + prefixIterOptions := &pebble.IterOptions{ + LowerBound: storagePrefixBytes, + UpperBound: keyUpperBound(storagePrefixBytes), + } + + // Ignore non-storage keys + inputDBIter := inputDB.NewIter(prefixIterOptions) + writeBatch := inputDB.NewBatch() + for inputDBIter.First(); inputDBIter.Valid(); inputDBIter.Next() { + key := inputDBIter.Key() // Storage keys not found in filter database are deleted. nodeHash := bytes.TrimPrefix(key, storagePrefixBytes) _, err := p.filterDatabase.Get(nodeHash) - return err == nil - } - - writeBatch := inputDB.NewWriteBatch() - stream.Send = func(l *pb.KVList) error { - keyValues := l.GetKv() - for _, keyValue := range keyValues { - err = writeBatch.Delete(keyValue.Key) - if err != nil { - return err + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + continue } + + return fmt.Errorf("checking filter database: %w", err) + } + + err = writeBatch.Delete(key, &pebble.WriteOptions{}) + if err != nil { + return fmt.Errorf("inserting in the batch delete: %w", err) } - return nil } - err = writeBatch.Flush() + err = writeBatch.Commit(pebble.Sync) if err != nil { return fmt.Errorf("flushing write batch: %w", err) } diff --git a/dot/state/service.go b/dot/state/service.go index 674930f922..def8e2fcd1 100644 --- a/dot/state/service.go +++ b/dot/state/service.go @@ -9,13 +9,12 @@ import ( "github.com/ChainSafe/gossamer/dot/state/pruner" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/internal/metrics" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/lib/utils" - - "github.com/ChainSafe/chaindb" ) var logger = log.NewFromGlobal( @@ -26,7 +25,7 @@ var logger = log.NewFromGlobal( type Service struct { dbPath string logLvl log.Level - db *chaindb.BadgerDB + db database.Database isMemDB bool // set to true if using an in-memory database; only used for testing. Base *BaseState Storage *StorageState @@ -79,7 +78,7 @@ func (s *Service) UseMemDB() { } // DB returns the Service's database -func (s *Service) DB() *chaindb.BadgerDB { +func (s *Service) DB() database.Database { return s.db } @@ -269,11 +268,11 @@ func (s *Service) Import(header *types.Header, t *trie.Trie, firstSlot uint64) e } block := &BlockState{ - db: chaindb.NewTable(s.db, blockPrefix), + db: database.NewTable(s.db, blockPrefix), } storage := &StorageState{ - db: chaindb.NewTable(s.db, storagePrefix), + db: database.NewTable(s.db, storagePrefix), } epoch, err := NewEpochState(s.db, block) diff --git a/dot/state/service_integration_test.go b/dot/state/service_integration_test.go index 6ee2f9aa81..863705b9da 100644 --- a/dot/state/service_integration_test.go +++ b/dot/state/service_integration_test.go @@ -16,9 +16,9 @@ import ( "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/trie" + "github.com/cockroachdb/pebble" "github.com/golang/mock/gomock" - "github.com/ChainSafe/chaindb" "github.com/stretchr/testify/require" ) @@ -219,7 +219,7 @@ func TestService_StorageTriePruning(t *testing.T) { require.NoError(t, err, fmt.Sprintf("Got error for block %d", b.Header.Number)) continue } - require.ErrorIs(t, err, chaindb.ErrKeyNotFound, fmt.Sprintf("Expected error for block %d", b.Header.Number)) + require.ErrorIs(t, err, pebble.ErrNotFound, fmt.Sprintf("Expected error for block %d", b.Header.Number)) } } @@ -356,10 +356,10 @@ func TestService_Rewind(t *testing.T) { require.NoError(t, err) _, err = serv.Grandpa.GetSetIDChange(2) - require.Equal(t, chaindb.ErrKeyNotFound, err) + require.Equal(t, pebble.ErrNotFound, err) _, err = serv.Grandpa.GetSetIDChange(3) - require.Equal(t, chaindb.ErrKeyNotFound, err) + require.Equal(t, pebble.ErrNotFound, err) } func TestService_Import(t *testing.T) { diff --git a/dot/state/slot.go b/dot/state/slot.go index 9ffdc0ad1d..9e8caf67de 100644 --- a/dot/state/slot.go +++ b/dot/state/slot.go @@ -9,9 +9,10 @@ import ( "errors" "fmt" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" ) const slotTablePrefix = "slot" @@ -28,11 +29,11 @@ var ( ) type SlotState struct { - db chaindb.Database + db database.Table } -func NewSlotState(db *chaindb.BadgerDB) *SlotState { - slotStateDB := chaindb.NewTable(db, slotTablePrefix) +func NewSlotState(db database.Database) *SlotState { + slotStateDB := database.NewTable(db, slotTablePrefix) return &SlotState{ db: slotStateDB, @@ -57,7 +58,7 @@ func (s *SlotState) CheckEquivocation(slotNow, slot uint64, header *types.Header currentSlotKey := bytes.Join([][]byte{slotHeaderMapKey, slotEncoded[:]}, nil) encodedHeadersWithSigners, err := s.db.Get(currentSlotKey) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + if err != nil && !errors.Is(err, pebble.ErrNotFound) { return nil, fmt.Errorf("getting key slot header map key %d: %w", slot, err) } @@ -89,7 +90,7 @@ func (s *SlotState) CheckEquivocation(slotNow, slot uint64, header *types.Header firstSavedSlot := slot firstSavedSlotEncoded, err := s.db.Get(slotHeaderStartKey) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + if err != nil && !errors.Is(err, pebble.ErrNotFound) { return nil, fmt.Errorf("getting key slot header start key: %w", err) } diff --git a/dot/state/slot_test.go b/dot/state/slot_test.go index e7f1eea43e..300b32fd1b 100644 --- a/dot/state/slot_test.go +++ b/dot/state/slot_test.go @@ -11,11 +11,12 @@ import ( "io" "testing" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/lib/keystore" + "github.com/cockroachdb/pebble" "github.com/minio/sha256-simd" "github.com/stretchr/testify/require" ) @@ -41,7 +42,7 @@ func createHeader(t *testing.T, n uint) (header *types.Header) { return header } -func checkSlotToMapKeyExists(t *testing.T, db chaindb.Database, slotNumber uint64) bool { +func checkSlotToMapKeyExists(t *testing.T, db database.Table, slotNumber uint64) bool { t.Helper() slotEncoded := make([]byte, 8) @@ -51,7 +52,7 @@ func checkSlotToMapKeyExists(t *testing.T, db chaindb.Database, slotNumber uint6 _, err := db.Get(slotToHeaderKey) if err != nil { - if errors.Is(err, chaindb.ErrKeyNotFound) { + if errors.Is(err, pebble.ErrNotFound) { return false } @@ -62,10 +63,7 @@ func checkSlotToMapKeyExists(t *testing.T, db chaindb.Database, slotNumber uint6 } func Test_checkEquivocation(t *testing.T) { - inMemoryDB, err := chaindb.NewBadgerDB(&chaindb.Config{ - DataDir: t.TempDir(), - InMemory: true, - }) + inMemoryDB, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) kr, err := keystore.NewSr25519Keyring() diff --git a/dot/state/storage.go b/dot/state/storage.go index efaba3acff..b7104b2c68 100644 --- a/dot/state/storage.go +++ b/dot/state/storage.go @@ -8,9 +8,9 @@ import ( "fmt" "sync" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/state/pruner" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" rtstorage "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/trie" @@ -44,9 +44,9 @@ type StorageState struct { // NewStorageState creates a new StorageState backed by the given block state // and database located at basePath. -func NewStorageState(db *chaindb.BadgerDB, blockState *BlockState, +func NewStorageState(db database.Database, blockState *BlockState, tries *Tries) (*StorageState, error) { - storageTable := chaindb.NewTable(db, storagePrefix) + storageTable := database.NewTable(db, storagePrefix) return &StorageState{ blockState: blockState, diff --git a/dot/state/storage_notify_test.go b/dot/state/storage_notify_test.go index 4751e6137f..564458dc7c 100644 --- a/dot/state/storage_notify_test.go +++ b/dot/state/storage_notify_test.go @@ -4,16 +4,11 @@ package state import ( - "context" - "fmt" - "log" "sync" "testing" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/lib/common" - "github.com/dgraph-io/badger/v4/pb" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) @@ -135,63 +130,3 @@ func TestStorageState_RegisterStorageObserver_Multi_Filter(t *testing.T) { ss.UnregisterStorageObserver(observer) } } - -func Test_Example(t *testing.T) { - // this is a working example of how to use db.Subscribe taken from - // https://github.com/dgraph-io/badger/blob/f50343ff404d8198df6dc83755ec2eab863d5ff2/db_test.go#L1939-L1948 - prefix := []byte{'a'} - match := []pb.Match{ - { - Prefix: prefix, - }, - } - - // This key should be printed, since it matches the prefix. - aKey := []byte("a-key") - aValue := []byte("a-value") - - // This key should not be printed. - bKey := []byte("b-key") - bValue := []byte("b-value") - - // Open the DB. - db := NewInMemoryDB(t) - - // Create the context here so we can cancel it after sending the writes. - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Use the WaitGroup to make sure we wait for the subscription to stop before continuing. - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - cb := func(kvs *chaindb.KVList) error { - for _, kv := range kvs.Kv { - fmt.Printf("%s is now set to %s\n", kv.Key, kv.Value) - } - return nil - } - - if err := db.Subscribe(ctx, cb, match); err != nil && err != context.Canceled { - log.Fatal(err) - } - log.Printf("subscription closed") - }() - - // Write both keys, but only one should be printed in the Output. - err := db.Put(aKey, aValue) - if err != nil { - log.Fatal(err) - } - err = db.Put(bKey, bValue) - if err != nil { - log.Fatal(err) - } - log.Printf("stopping subscription") - cancel() - log.Printf("waiting for subscription to close") - wg.Wait() - // Output: - // a-key is now set to a-value -} diff --git a/dot/state/test_helpers.go b/dot/state/test_helpers.go index 82c2d60858..afa2ebe0b9 100644 --- a/dot/state/test_helpers.go +++ b/dot/state/test_helpers.go @@ -10,8 +10,8 @@ import ( "testing" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" runtime "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/trie" @@ -24,7 +24,7 @@ import ( var inc, _ = time.ParseDuration("1s") // NewInMemoryDB creates a new in-memory database -func NewInMemoryDB(t *testing.T) *chaindb.BadgerDB { +func NewInMemoryDB(t *testing.T) database.Database { testDatadirPath := t.TempDir() db, err := utils.SetupDatabase(testDatadirPath, true) diff --git a/dot/sync/benchmark.go b/dot/sync/benchmark.go deleted file mode 100644 index 018cb8b1e4..0000000000 --- a/dot/sync/benchmark.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "container/ring" - "time" -) - -type syncBenchmarker struct { - start time.Time - startBlock uint - blocksPerSecond *ring.Ring - samplesToKeep int -} - -func newSyncBenchmarker(samplesToKeep int) *syncBenchmarker { - if samplesToKeep == 0 { - panic("cannot have 0 samples to keep") - } - - return &syncBenchmarker{ - blocksPerSecond: ring.New(samplesToKeep), - samplesToKeep: samplesToKeep, - } -} - -func (b *syncBenchmarker) begin(now time.Time, block uint) { - b.start = now - b.startBlock = block -} - -func (b *syncBenchmarker) end(now time.Time, block uint) { - duration := now.Sub(b.start) - blocks := block - b.startBlock - bps := float64(blocks) / duration.Seconds() - b.blocksPerSecond.Value = bps - b.blocksPerSecond = b.blocksPerSecond.Next() -} - -func (b *syncBenchmarker) average() float64 { - var sum float64 - var elementsSet int - b.blocksPerSecond.Do(func(x interface{}) { - if x == nil { - return - } - bps := x.(float64) - sum += bps - elementsSet++ - }) - - if elementsSet == 0 { - return 0 - } - - return sum / float64(elementsSet) -} - -func (b *syncBenchmarker) mostRecentAverage() float64 { - value := b.blocksPerSecond.Prev().Value - if value == nil { - return 0 - } - return value.(float64) -} diff --git a/dot/sync/benchmark_test.go b/dot/sync/benchmark_test.go deleted file mode 100644 index eae329300e..0000000000 --- a/dot/sync/benchmark_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2022 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "container/ring" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func Test_newSyncBenchmarker(t *testing.T) { - t.Parallel() - - t.Run("10_samples_to_keep", func(t *testing.T) { - t.Parallel() - const samplesToKeep = 10 - actual := newSyncBenchmarker(samplesToKeep) - - expected := &syncBenchmarker{ - blocksPerSecond: ring.New(samplesToKeep), - samplesToKeep: samplesToKeep, - } - - assert.Equal(t, expected, actual) - }) - - t.Run("panics_on_0_sample_to_keep", func(t *testing.T) { - t.Parallel() - const samplesToKeep = 0 - assert.PanicsWithValue(t, "cannot have 0 samples to keep", func() { - newSyncBenchmarker(samplesToKeep) - }) - }) -} - -func Test_syncBenchmarker_begin(t *testing.T) { - t.Parallel() - - const startSec = 1000 - start := time.Unix(startSec, 0) - const startBlock = 10 - - b := syncBenchmarker{} - b.begin(start, startBlock) - - expected := syncBenchmarker{ - start: start, - startBlock: startBlock, - } - - assert.Equal(t, expected, b) -} - -func Test_syncBenchmarker_end(t *testing.T) { - t.Parallel() - - const startSec = 1000 - start := time.Unix(startSec, 0) - - const nowSec = 1010 - now := time.Unix(nowSec, 0) - - const ( - startBlock = 10 - endBlock = 12 - ) - - const ringCap = 3 - - blocksPerSecond := ring.New(ringCap) - blocksPerSecond.Value = 1.00 - blocksPerSecond = blocksPerSecond.Next() - - b := syncBenchmarker{ - start: start, - startBlock: startBlock, - blocksPerSecond: blocksPerSecond, - } - b.end(now, endBlock) - - expectedBlocksPerSecond := ring.New(ringCap) - expectedBlocksPerSecond.Value = 1.00 - expectedBlocksPerSecond = expectedBlocksPerSecond.Next() - expectedBlocksPerSecond.Value = 0.2 - expectedBlocksPerSecond = expectedBlocksPerSecond.Next() - - expected := syncBenchmarker{ - start: start, - startBlock: startBlock, - blocksPerSecond: expectedBlocksPerSecond, - } - - assert.Equal(t, expected, b) -} - -func Test_syncBenchmarker_average(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - values []float64 - ringCap int - average float64 - }{ - // zero size ring is not possible due to constructor check - "empty_ring": { - ringCap: 1, - }, - "single_element_in_one-size_ring": { - values: []float64{1.1}, - ringCap: 1, - average: 1.1, - }, - "single_element_in_two-size_ring": { - values: []float64{1.1}, - ringCap: 2, - average: 1.1, - }, - "two_elements_in_two-size_ring": { - values: []float64{1.0, 2.0}, - ringCap: 2, - average: 1.5, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - blocksPerSecond := ring.New(testCase.ringCap) - for _, value := range testCase.values { - blocksPerSecond.Value = value - blocksPerSecond = blocksPerSecond.Next() - } - - benchmarker := syncBenchmarker{ - blocksPerSecond: blocksPerSecond, - samplesToKeep: testCase.ringCap, - } - - avg := benchmarker.average() - - assert.Equal(t, testCase.average, avg) - }) - } -} - -func Test_syncBenchmarker_mostRecentAverage(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - values []float64 - ringCap int - average float64 - }{ - // zero size ring is not possible due to constructor check - "empty_ring": { - ringCap: 1, - }, - "single_element_in_one-size_ring": { - values: []float64{1.1}, - ringCap: 1, - average: 1.1, - }, - "single_element_in_two-size_ring": { - values: []float64{1.1}, - ringCap: 2, - average: 1.1, - }, - "two_elements_in_two-size_ring": { - values: []float64{1.0, 2.0}, - ringCap: 2, - average: 2.0, - }, - "three_elements_in_two-size_ring": { - values: []float64{1.0, 2.0, 3.0}, - ringCap: 2, - average: 3.0, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - - blocksPerSecond := ring.New(testCase.ringCap) - for _, value := range testCase.values { - blocksPerSecond.Value = value - blocksPerSecond = blocksPerSecond.Next() - } - - benchmarker := syncBenchmarker{ - blocksPerSecond: blocksPerSecond, - } - - avg := benchmarker.mostRecentAverage() - - assert.Equal(t, testCase.average, avg) - }) - } -} - -func Test_syncBenchmarker(t *testing.T) { - t.Parallel() - - const samplesToKeep = 5 - benchmarker := newSyncBenchmarker(samplesToKeep) - - const initialBlock = 10 - timeZero := time.Unix(0, 0) - const timeIncrement = time.Second - const baseBlocksIncrement uint = 1 - - startTime := timeZero - endTime := startTime.Add(timeIncrement) - var block uint = initialBlock - - const samples = 10 - for i := 0; i < samples; i++ { - benchmarker.begin(startTime, block) - block += baseBlocksIncrement + uint(i) - benchmarker.end(endTime, block) - - startTime = startTime.Add(timeIncrement) - endTime = startTime.Add(timeIncrement) - } - - avg := benchmarker.average() - const expectedAvg = 8.0 - assert.Equal(t, expectedAvg, avg) - - mostRecentAvg := benchmarker.mostRecentAverage() - const expectedMostRecentAvg = 10.0 - assert.Equal(t, expectedMostRecentAvg, mostRecentAvg) -} diff --git a/dot/sync/block_queue.go b/dot/sync/block_queue.go deleted file mode 100644 index 9b5a81d597..0000000000 --- a/dot/sync/block_queue.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "sync" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" -) - -type blockQueue struct { - queue chan *types.BlockData - hashesSet map[common.Hash]struct{} - hashesSetMutex sync.RWMutex -} - -// newBlockQueue initialises a queue of *types.BlockData with the given capacity. -func newBlockQueue(capacity int) *blockQueue { - return &blockQueue{ - queue: make(chan *types.BlockData, capacity), - hashesSet: make(map[common.Hash]struct{}, capacity), - } -} - -// push pushes an item into the queue. It blocks if the queue is at capacity. -func (bq *blockQueue) push(blockData *types.BlockData) { - bq.hashesSetMutex.Lock() - bq.hashesSet[blockData.Hash] = struct{}{} - bq.hashesSetMutex.Unlock() - - bq.queue <- blockData -} - -// pop pops the next item from the queue. It blocks if the queue is empty -// until the context is cancelled. If the context is canceled, it returns -// the error from the context. -func (bq *blockQueue) pop(ctx context.Context) (blockData *types.BlockData, err error) { - select { - case <-ctx.Done(): - return blockData, ctx.Err() - case blockData = <-bq.queue: - } - bq.hashesSetMutex.Lock() - delete(bq.hashesSet, blockData.Hash) - bq.hashesSetMutex.Unlock() - return blockData, nil -} - -func (bq *blockQueue) has(blockHash common.Hash) (has bool) { - bq.hashesSetMutex.RLock() - defer bq.hashesSetMutex.RUnlock() - _, has = bq.hashesSet[blockHash] - return has -} diff --git a/dot/sync/block_queue_test.go b/dot/sync/block_queue_test.go deleted file mode 100644 index cff9b181b3..0000000000 --- a/dot/sync/block_queue_test.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func Test_newBlockQueue(t *testing.T) { - t.Parallel() - - const capacity = 1 - bq := newBlockQueue(capacity) - - require.NotNil(t, bq.queue) - assert.Equal(t, 1, cap(bq.queue)) - assert.Equal(t, 0, len(bq.queue)) - bq.queue = nil - - expectedBlockQueue := &blockQueue{ - hashesSet: make(map[common.Hash]struct{}, capacity), - } - assert.Equal(t, expectedBlockQueue, bq) -} - -func Test_blockQueue_push(t *testing.T) { - t.Parallel() - - const capacity = 1 - bq := newBlockQueue(capacity) - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - - bq.push(blockData) - - // cannot compare channels - require.NotNil(t, bq.queue) - assert.Len(t, bq.queue, 1) - - receivedBlockData := <-bq.queue - expectedBlockData := &types.BlockData{ - Hash: common.Hash{1}, - } - assert.Equal(t, expectedBlockData, receivedBlockData) - - bq.queue = nil - expectedBlockQueue := &blockQueue{ - hashesSet: map[common.Hash]struct{}{{1}: {}}, - } - assert.Equal(t, expectedBlockQueue, bq) -} - -func Test_blockQueue_pop(t *testing.T) { - t.Parallel() - - t.Run("context_canceled", func(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - const capacity = 1 - bq := newBlockQueue(capacity) - - blockData, err := bq.pop(ctx) - assert.Nil(t, blockData) - assert.ErrorIs(t, err, context.Canceled) - }) - - t.Run("get_block_data_after_waiting", func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - const capacity = 1 - bq := newBlockQueue(capacity) - - const afterDuration = 5 * time.Millisecond - time.AfterFunc(afterDuration, func() { - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - bq.push(blockData) - }) - - blockData, err := bq.pop(ctx) - - expectedBlockData := &types.BlockData{ - Hash: common.Hash{1}, - } - assert.Equal(t, expectedBlockData, blockData) - assert.NoError(t, err) - - assert.Len(t, bq.queue, 0) - bq.queue = nil - expectedBlockQueue := &blockQueue{ - hashesSet: map[common.Hash]struct{}{}, - } - assert.Equal(t, expectedBlockQueue, bq) - }) -} - -func Test_blockQueue_has(t *testing.T) { - t.Parallel() - - testCases := map[string]struct { - blockQueue *blockQueue - blockHash common.Hash - has bool - }{ - "absent": { - blockQueue: &blockQueue{ - hashesSet: map[common.Hash]struct{}{}, - }, - blockHash: common.Hash{1}, - }, - "exists": { - blockQueue: &blockQueue{ - hashesSet: map[common.Hash]struct{}{{1}: {}}, - }, - blockHash: common.Hash{1}, - has: true, - }, - } - - for name, tc := range testCases { - testCase := tc - t.Run(name, func(t *testing.T) { - t.Parallel() - - has := testCase.blockQueue.has(testCase.blockHash) - assert.Equal(t, testCase.has, has) - }) - } -} - -func Test_lockQueue_endToEnd(t *testing.T) { - t.Parallel() - - const capacity = 10 - blockQueue := newBlockQueue(capacity) - - newBlockData := func(i byte) *types.BlockData { - return &types.BlockData{ - Hash: common.Hash{i}, - } - } - - blockQueue.push(newBlockData(1)) - blockQueue.push(newBlockData(2)) - blockQueue.push(newBlockData(3)) - - blockData, err := blockQueue.pop(context.Background()) - assert.Equal(t, newBlockData(1), blockData) - assert.NoError(t, err) - - has := blockQueue.has(newBlockData(2).Hash) - assert.True(t, has) - has = blockQueue.has(newBlockData(3).Hash) - assert.True(t, has) - - blockQueue.push(newBlockData(4)) - - has = blockQueue.has(newBlockData(4).Hash) - assert.True(t, has) - - blockData, err = blockQueue.pop(context.Background()) - assert.Equal(t, newBlockData(2), blockData) - assert.NoError(t, err) - - // drain queue - for len(blockQueue.queue) > 0 { - <-blockQueue.queue - } -} - -func Test_lockQueue_threadSafety(t *testing.T) { - // This test consists in checking for concurrent access - // using the -race detector. - t.Parallel() - - var startWg, endWg sync.WaitGroup - ctx, cancel := context.WithCancel(context.Background()) - - const operations = 3 - const parallelism = 3 - const goroutines = parallelism * operations - startWg.Add(goroutines) - endWg.Add(goroutines) - - const testDuration = 50 * time.Millisecond - go func() { - timer := time.NewTimer(time.Hour) - startWg.Wait() - _ = timer.Reset(testDuration) - <-timer.C - cancel() - }() - - runInLoop := func(f func()) { - defer endWg.Done() - startWg.Done() - startWg.Wait() - for ctx.Err() == nil { - f() - } - } - - const capacity = 10 - blockQueue := newBlockQueue(capacity) - blockData := &types.BlockData{ - Hash: common.Hash{1}, - } - blockHash := common.Hash{1} - - endWg.Add(1) - go func() { - defer endWg.Done() - <-ctx.Done() - // Empty queue channel to make sure `push` does not block - // when the context is cancelled. - for len(blockQueue.queue) > 0 { - <-blockQueue.queue - } - }() - - for i := 0; i < parallelism; i++ { - go runInLoop(func() { - blockQueue.push(blockData) - }) - - go runInLoop(func() { - _, _ = blockQueue.pop(ctx) - }) - - go runInLoop(func() { - _ = blockQueue.has(blockHash) - }) - } - - endWg.Wait() -} diff --git a/dot/sync/bootstrap_syncer.go b/dot/sync/bootstrap_syncer.go deleted file mode 100644 index 4b0b1ecc39..0000000000 --- a/dot/sync/bootstrap_syncer.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common" -) - -var _ workHandler = &bootstrapSyncer{} - -var bootstrapRequestData = network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification - -// bootstrapSyncer handles worker logic for bootstrap mode -type bootstrapSyncer struct { - blockState BlockState -} - -func newBootstrapSyncer(blockState BlockState) *bootstrapSyncer { - return &bootstrapSyncer{ - blockState: blockState, - } -} - -func (s *bootstrapSyncer) handleNewPeerState(ps *peerState) (*worker, error) { - head, err := s.blockState.BestBlockHeader() - if err != nil { - return nil, err - } - - if ps.number <= head.Number { - return nil, nil //nolint:nilnil - } - - return &worker{ - startNumber: uintPtr(head.Number + 1), - targetHash: ps.hash, - targetNumber: uintPtr(ps.number), - requestData: bootstrapRequestData, - direction: network.Ascending, - }, nil -} - -//nolint:nilnil -func (s *bootstrapSyncer) handleWorkerResult(res *worker) ( - workerToRetry *worker, err error) { - // if there is an error, potentially retry the worker - if res.err == nil { - return nil, nil - } - - // new worker should update start block and re-dispatch - head, err := s.blockState.BestBlockHeader() - if err != nil { - return nil, err - } - - // we've reached the target, return - if *res.targetNumber <= head.Number { - return nil, nil - } - - startNumber := head.Number + 1 - - // in the case we started a block producing node, we might have produced blocks - // before fully syncing (this should probably be fixed by connecting sync into BABE) - if errors.Is(res.err.err, errUnknownParent) { - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - startNumber = fin.Number - } - - return &worker{ - startHash: common.Hash{}, // for bootstrap, just use number - startNumber: uintPtr(startNumber), - targetHash: res.targetHash, - targetNumber: res.targetNumber, - requestData: res.requestData, - direction: res.direction, - }, nil -} - -func (*bootstrapSyncer) hasCurrentWorker(_ *worker, workers map[uint64]*worker) bool { - // we're in bootstrap mode, and there already is a worker, we don't need to dispatch another - return len(workers) != 0 -} - -func (*bootstrapSyncer) handleTick() ([]*worker, error) { - return nil, nil -} diff --git a/dot/sync/bootstrap_syncer_integration_test.go b/dot/sync/bootstrap_syncer_integration_test.go deleted file mode 100644 index 20a9e32d3e..0000000000 --- a/dot/sync/bootstrap_syncer_integration_test.go +++ /dev/null @@ -1,135 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/trie" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" -) - -func newTestBootstrapSyncer(t *testing.T) *bootstrapSyncer { - header := types.NewHeader( - common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 100, types.NewDigest()) - - finHeader := types.NewHeader( - common.NewHash([]byte{0}), trie.EmptyHash, - trie.EmptyHash, 200, types.NewDigest()) - - ctrl := gomock.NewController(t) - bs := NewMockBlockState(ctrl) - bs.EXPECT().BestBlockHeader().Return(header, nil).AnyTimes() - bs.EXPECT().GetHighestFinalisedHeader().Return(finHeader, nil).AnyTimes() - - return newBootstrapSyncer(bs) -} - -func TestBootstrapSyncer_handleWork(t *testing.T) { - s := newTestBootstrapSyncer(t) - - // peer's state is equal or lower than ours - // should not create a worker for bootstrap mode - w, err := s.handleNewPeerState(&peerState{ - number: 100, - }) - require.NoError(t, err) - require.Nil(t, w) - - w, err = s.handleNewPeerState(&peerState{ - number: 99, - }) - require.NoError(t, err) - require.Nil(t, w) - - // if peer's number is highest, return worker w/ their block as target - expected := &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(101), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(101), - } - w, err = s.handleNewPeerState(&peerState{ - number: 101, - hash: common.NewHash([]byte{1}), - }) - require.NoError(t, err) - require.Equal(t, expected, w) - - expected = &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(101), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(9999), - } - w, err = s.handleNewPeerState(&peerState{ - number: 9999, - hash: common.NewHash([]byte{1}), - }) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestBootstrapSyncer_handleWorkerResult(t *testing.T) { - s := newTestBootstrapSyncer(t) - - // if the worker error is nil, then this function should do nothing - res := &worker{} - w, err := s.handleWorkerResult(res) - require.NoError(t, err) - require.Nil(t, w) - - // if there was a worker error, this should return a worker with - // startNumber = bestBlockNumber + 1 and the same target as previously - expected := &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(101), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(201), - } - - res = &worker{ - requestData: bootstrapRequestData, - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(201), - err: &workerError{}, - } - - w, err = s.handleWorkerResult(res) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestBootstrapSyncer_handleWorkerResult_errUnknownParent(t *testing.T) { - s := newTestBootstrapSyncer(t) - - // if there was a worker error, this should return a worker with - // startNumber = bestBlockNumber + 1 and the same target as previously - expected := &worker{ - requestData: bootstrapRequestData, - startNumber: uintPtr(200), - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(300), - } - - res := &worker{ - requestData: bootstrapRequestData, - targetHash: common.NewHash([]byte{1}), - targetNumber: uintPtr(300), - err: &workerError{ - err: errUnknownParent, - }, - } - - w, err := s.handleWorkerResult(res) - require.NoError(t, err) - require.Equal(t, expected, w) -} diff --git a/dot/sync/bootstrap_syncer_test.go b/dot/sync/bootstrap_syncer_test.go deleted file mode 100644 index 9d59f8dd27..0000000000 --- a/dot/sync/bootstrap_syncer_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "testing" - - "github.com/ChainSafe/gossamer/dot/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func Test_bootstrapSyncer_handleWorkerResult(t *testing.T) { - t.Parallel() - mockError := errors.New("mock testing error") - - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - worker *worker - wantWorkerToRetry *worker - err error - }{ - "nil_worker.err_returns_nil": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return NewMockBlockState(ctrl) - }, - worker: &worker{}, - }, - "best_block_header_error": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(nil, - mockError) - return mockBlockState - }, - worker: &worker{ - err: &workerError{}, - targetNumber: uintPtr(0), - }, - err: mockError, - }, - "targetNumber_<_bestBlockHeader_number_returns_nil": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - return mockBlockState - }, - worker: &worker{ - err: &workerError{}, - targetNumber: uintPtr(0), - }, - }, - "targetNumber_>_bestBlockHeader_number_worker_errUnknownParent,_error_GetHighestFinalisedHeader": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(nil, mockError) - return mockBlockState - }, - worker: &worker{ - err: &workerError{err: errUnknownParent}, - targetNumber: uintPtr(3), - }, - err: mockError, - }, - "targetNumber_>_bestBlockHeader_number_worker_errUnknownParent_returns_worker": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{Number: 1}, nil) - return mockBlockState - }, - worker: &worker{ - err: &workerError{err: errUnknownParent}, - targetNumber: uintPtr(3), - }, - wantWorkerToRetry: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(3), - }, - }, - "targetNumber_>_bestBlockHeader_number_returns_worker": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{Number: 2}, nil) - return mockBlockState - }, - worker: &worker{ - err: &workerError{}, - targetNumber: uintPtr(3), - }, - wantWorkerToRetry: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - }, - }, - } - for testName, tt := range tests { - tt := tt - t.Run(testName, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &bootstrapSyncer{ - blockState: tt.blockStateBuilder(ctrl), - } - gotWorkerToRetry, err := s.handleWorkerResult(tt.worker) - assert.ErrorIs(t, err, tt.err) - assert.Equal(t, tt.wantWorkerToRetry, gotWorkerToRetry) - }) - } -} diff --git a/dot/sync/chain_processor.go b/dot/sync/chain_processor.go deleted file mode 100644 index c90e9b7159..0000000000 --- a/dot/sync/chain_processor.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "bytes" - "context" - "errors" - "fmt" - - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" -) - -// ChainProcessor processes ready blocks. -// it is implemented by *chainProcessor -type ChainProcessor interface { - processReadyBlocks() - stop() -} - -type chainProcessor struct { - ctx context.Context - cancel context.CancelFunc - - chainSync ChainSync - - // blocks that are ready for processing. ie. their parent is known, or their parent is ahead - // of them within this channel and thus will be processed first - readyBlocks *blockQueue - - // set of block not yet ready to be processed. - // blocks are placed here if they fail to be processed due to missing parent block - pendingBlocks DisjointBlockSet - - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry -} - -type chainProcessorConfig struct { - readyBlocks *blockQueue - pendingBlocks DisjointBlockSet - syncer ChainSync - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - telemetry Telemetry - badBlocks []string -} - -func newChainProcessor(cfg chainProcessorConfig) *chainProcessor { - ctx, cancel := context.WithCancel(context.Background()) - - return &chainProcessor{ - ctx: ctx, - cancel: cancel, - readyBlocks: cfg.readyBlocks, - pendingBlocks: cfg.pendingBlocks, - chainSync: cfg.syncer, - blockState: cfg.blockState, - storageState: cfg.storageState, - transactionState: cfg.transactionState, - babeVerifier: cfg.babeVerifier, - finalityGadget: cfg.finalityGadget, - blockImportHandler: cfg.blockImportHandler, - telemetry: cfg.telemetry, - } -} - -func (s *chainProcessor) stop() { - s.cancel() -} - -func (s *chainProcessor) processReadyBlocks() { - for { - bd, err := s.readyBlocks.pop(s.ctx) - if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return - } - panic(fmt.Sprintf("unhandled error: %s", err)) - } - - if err := s.processBlockData(*bd); err != nil { - // depending on the error, we might want to save this block for later - if !errors.Is(err, errFailedToGetParent) && !errors.Is(err, blocktree.ErrParentNotFound) { - logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) - continue - } - - logger.Tracef("block data processing for block with hash %s failed: %s", bd.Hash, err) - if err := s.pendingBlocks.addBlock(&types.Block{ - Header: *bd.Header, - Body: *bd.Body, - }); err != nil { - logger.Debugf("failed to re-add block to pending blocks: %s", err) - } - } - } -} - -// processBlockData processes the BlockData from a BlockResponse and -// returns the index of the last BlockData it handled on success, -// or the index of the block data that errored on failure. -func (c *chainProcessor) processBlockData(blockData types.BlockData) error { - logger.Debugf("processing block data with hash %s", blockData.Hash) - - headerInState, err := c.blockState.HasHeader(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has header: %w", err) - } - - bodyInState, err := c.blockState.HasBlockBody(blockData.Hash) - if err != nil { - return fmt.Errorf("checking if block state has body: %w", err) - } - - // while in bootstrap mode we don't need to broadcast block announcements - announceImportedBlock := c.chainSync.syncState() == tip - if headerInState && bodyInState { - err = c.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and "+ - "body in block state: %w", err) - } - return nil - } - - if blockData.Header != nil { - if blockData.Body != nil { - err = c.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) - if err != nil { - return fmt.Errorf("processing block data with header and body: %w", err) - } - logger.Debugf("block with hash %s processed", blockData.Hash) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(blockData.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - } - - err = c.blockState.CompareAndSetBlockData(&blockData) - if err != nil { - return fmt.Errorf("comparing and setting block data: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, - announceImportedBlock bool) (err error) { - // TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly, - // so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync - // if we update the node to only store finalised blocks in the database, this should be fixed and the entire - // code block can be removed (#1784) - block, err := c.blockState.GetBlockByHash(blockData.Hash) - if err != nil { - return fmt.Errorf("getting block by hash: %w", err) - } - - err = c.blockState.AddBlockToBlockTree(block) - if errors.Is(err, blocktree.ErrBlockExists) { - logger.Debugf( - "block number %d with hash %s already exists in block tree, skipping it.", - block.Header.Number, blockData.Hash) - return nil - } else if err != nil { - return fmt.Errorf("adding block to blocktree: %w", err) - } - - if blockData.Justification != nil && len(*blockData.Justification) > 0 { - err = c.handleJustification(&block.Header, *blockData.Justification) - if err != nil { - return fmt.Errorf("handling justification: %w", err) - } - } - - // TODO: this is probably unnecessary, since the state is already in the database - // however, this case shouldn't be hit often, since it's only hit if the node state - // is rewinded or if the node shuts down unexpectedly (#1784) - state, err := c.storageState.TrieState(&block.Header.StateRoot) - if err != nil { - return fmt.Errorf("loading trie state: %w", err) - } - - err = c.blockImportHandler.HandleBlockImport(block, state, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block import: %w", err) - } - - return nil -} - -func (c *chainProcessor) processBlockDataWithHeaderAndBody(blockData types.BlockData, - announceImportedBlock bool) (err error) { - err = c.babeVerifier.VerifyBlock(blockData.Header) - if err != nil { - return fmt.Errorf("babe verifying block: %w", err) - } - - c.handleBody(blockData.Body) - - block := &types.Block{ - Header: *blockData.Header, - Body: *blockData.Body, - } - - err = c.handleBlock(block, announceImportedBlock) - if err != nil { - return fmt.Errorf("handling block: %w", err) - } - - return nil -} - -// handleHeader handles block bodies included in BlockResponses -func (s *chainProcessor) handleBody(body *types.Body) { - for _, ext := range *body { - s.transactionState.RemoveExtrinsic(ext) - } -} - -// handleHeader handles blocks (header+body) included in BlockResponses -func (s *chainProcessor) handleBlock(block *types.Block, announceImportedBlock bool) error { - parent, err := s.blockState.GetHeader(block.Header.ParentHash) - if err != nil { - return fmt.Errorf("%w: %s", errFailedToGetParent, err) - } - - s.storageState.Lock() - defer s.storageState.Unlock() - - ts, err := s.storageState.TrieState(&parent.StateRoot) - if err != nil { - return err - } - - root := ts.MustRoot() - if !bytes.Equal(parent.StateRoot[:], root[:]) { - panic("parent state root does not match snapshot state root") - } - - rt, err := s.blockState.GetRuntime(parent.Hash()) - if err != nil { - return err - } - - rt.SetContextStorage(ts) - - _, err = rt.ExecuteBlock(block) - if err != nil { - return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) - } - - if err = s.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { - return err - } - - logger.Debugf("🔗 imported block number %d with hash %s", block.Header.Number, block.Header.Hash()) - - blockHash := block.Header.Hash() - s.telemetry.SendMessage(telemetry.NewBlockImport( - &blockHash, - block.Header.Number, - "NetworkInitialSync")) - - return nil -} - -func (s *chainProcessor) handleJustification(header *types.Header, justification []byte) (err error) { - logger.Debugf("handling justification for block %d...", header.Number) - - headerHash := header.Hash() - err = s.finalityGadget.VerifyBlockJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) - } - - err = s.blockState.SetJustification(headerHash, justification) - if err != nil { - return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) - } - - logger.Infof("🔨 finalised block number %d with hash %s", header.Number, headerHash) - return nil -} diff --git a/dot/sync/chain_processor_integration_test.go b/dot/sync/chain_processor_integration_test.go deleted file mode 100644 index 7164cd75d2..0000000000 --- a/dot/sync/chain_processor_integration_test.go +++ /dev/null @@ -1,348 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/state" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/babe/inherents" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/common/variadic" - runtime "github.com/ChainSafe/gossamer/lib/runtime" - "github.com/ChainSafe/gossamer/lib/transaction" - "github.com/ChainSafe/gossamer/pkg/scale" - - "github.com/stretchr/testify/require" -) - -func buildBlockWithSlotAndTimestamp(t *testing.T, instance runtime.Instance, - parent *types.Header, currentSlot, timestamp uint64) *types.Block { - t.Helper() - - digest := types.NewDigest() - prd, err := types.NewBabeSecondaryPlainPreDigest(0, currentSlot).ToPreRuntimeDigest() - require.NoError(t, err) - err = digest.Add(*prd) - require.NoError(t, err) - header := &types.Header{ - ParentHash: parent.Hash(), - StateRoot: common.Hash{}, - ExtrinsicsRoot: common.Hash{}, - Number: parent.Number + 1, - Digest: digest, - } - - err = instance.InitializeBlock(header) - require.NoError(t, err) - - inherentData := types.NewInherentData() - err = inherentData.SetInherent(types.Timstap0, timestamp) - require.NoError(t, err) - - err = inherentData.SetInherent(types.Babeslot, currentSlot) - require.NoError(t, err) - - parachainInherent := inherents.ParachainInherentData{ - ParentHeader: *parent, - } - - err = inherentData.SetInherent(types.Parachn0, parachainInherent) - require.NoError(t, err) - - err = inherentData.SetInherent(types.Newheads, []byte{0}) - require.NoError(t, err) - - encodedInherentData, err := inherentData.Encode() - require.NoError(t, err) - - // Call BlockBuilder_inherent_extrinsics which returns the inherents as encoded extrinsics - encodedInherentExtrinsics, err := instance.InherentExtrinsics(encodedInherentData) - require.NoError(t, err) - - var inherentExtrinsics [][]byte - err = scale.Unmarshal(encodedInherentExtrinsics, &inherentExtrinsics) - require.NoError(t, err) - - for _, inherent := range inherentExtrinsics { - encodedInherent, err := scale.Marshal(inherent) - require.NoError(t, err) - - applyExtrinsicResult, err := instance.ApplyExtrinsic(encodedInherent) - require.NoError(t, err) - require.Equal(t, applyExtrinsicResult, []byte{0, 0}) - } - - finalisedHeader, err := instance.FinalizeBlock() - require.NoError(t, err) - - body := types.Body(types.BytesArrayToExtrinsics(inherentExtrinsics)) - - finalisedHeader.Number = header.Number - finalisedHeader.Hash() - - return &types.Block{ - Header: *finalisedHeader, - Body: body, - } -} - -func buildAndAddBlocksToState(t *testing.T, - runtime runtime.Instance, blockState *state.BlockState, amount uint) { - - t.Helper() - - parent, err := blockState.BestBlockHeader() - require.NoError(t, err) - - babeConfig, err := runtime.BabeConfiguration() - require.NoError(t, err) - - timestamp := uint64(time.Now().Unix()) - slotDuration := babeConfig.SlotDuration - - for i := uint(0); i < amount; i++ { - // calculate the exact slot for each produced block - currentSlot := timestamp / slotDuration - - block := buildBlockWithSlotAndTimestamp(t, runtime, parent, currentSlot, timestamp) - err = blockState.AddBlock(block) - require.NoError(t, err) - parent = &block.Header - - // increase the timestamp by the slot duration - // so we will get a different slot for the next block - timestamp += slotDuration - } - -} - -func TestChainProcessor_HandleBlockResponse_ValidChain(t *testing.T) { - syncer := newTestSyncer(t) - responder := newTestSyncer(t) - - bestBlockHash := responder.blockState.(*state.BlockState).BestBlockHash() - runtimeInstance, err := responder.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - buildAndAddBlocksToState(t, runtimeInstance, - responder.blockState.(*state.BlockState), maxResponseSize*2) - - // syncer makes request for chain - startNum := 1 - start, err := variadic.NewUint32OrHash(startNum) - require.NoError(t, err) - - req := &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader + network.RequestedDataBody, - StartingBlock: *start, - } - - // get response - resp, err := responder.CreateBlockResponse(req) - require.NoError(t, err) - - // process response - for _, bd := range resp.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.NoError(t, err) - } - - // syncer makes request for chain again (block 129+) - startNum = 129 - start, err = variadic.NewUint32OrHash(startNum) - require.NoError(t, err) - - req = &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader + network.RequestedDataBody, - StartingBlock: *start, - } - - // get response - resp, err = responder.CreateBlockResponse(req) - require.NoError(t, err) - - // process response - for _, bd := range resp.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.NoError(t, err) - } -} - -func TestChainProcessor_HandleBlockResponse_MissingBlocks(t *testing.T) { - syncer := newTestSyncer(t) - - bestBlockHash := syncer.blockState.(*state.BlockState).BestBlockHash() - syncerRuntime, err := syncer.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - const syncerAmountOfBlocks = 4 - buildAndAddBlocksToState(t, syncerRuntime, syncer.blockState.(*state.BlockState), syncerAmountOfBlocks) - - responder := newTestSyncer(t) - responderRuntime, err := responder.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - const responderAmountOfBlocks = 16 - buildAndAddBlocksToState(t, responderRuntime, responder.blockState.(*state.BlockState), responderAmountOfBlocks) - - startNum := 15 - start, err := variadic.NewUint32OrHash(startNum) - require.NoError(t, err) - - req := &network.BlockRequestMessage{ - RequestedData: 3, - StartingBlock: *start, - } - - // resp contains blocks 15 to 15 + maxResponseSize) - resp, err := responder.CreateBlockResponse(req) - require.NoError(t, err) - - for _, bd := range resp.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.True(t, errors.Is(err, errFailedToGetParent)) - } -} - -func TestChainProcessor_handleBody_ShouldRemoveIncludedExtrinsics(t *testing.T) { - syncer := newTestSyncer(t) - - ext := []byte("nootwashere") - tx := &transaction.ValidTransaction{ - Extrinsic: ext, - Validity: &transaction.Validity{Priority: 1}, - } - - _, err := syncer.chainProcessor.(*chainProcessor).transactionState.(*state.TransactionState).Push(tx) - require.NoError(t, err) - - body := types.NewBody([]types.Extrinsic{ext}) - syncer.chainProcessor.(*chainProcessor).handleBody(body) - - inQueue := syncer.chainProcessor.(*chainProcessor).transactionState.(*state.TransactionState).Pop() - require.Nil(t, inQueue, "queue should be empty") -} - -func TestChainProcessor_HandleBlockResponse_BlockData(t *testing.T) { - syncer := newTestSyncer(t) - - parent, err := syncer.blockState.(*state.BlockState).BestBlockHeader() - require.NoError(t, err) - - runtimeInstance, err := syncer.blockState.GetRuntime(parent.Hash()) - require.NoError(t, err) - - babeConfig, err := runtimeInstance.BabeConfiguration() - require.NoError(t, err) - - timestamp := uint64(time.Now().Unix()) - slotDuration := babeConfig.SlotDuration - - // calculate the exact slot for each produced block - currentSlot := timestamp / slotDuration - block := buildBlockWithSlotAndTimestamp(t, runtimeInstance, parent, currentSlot, timestamp) - - bd := []*types.BlockData{{ - Hash: block.Header.Hash(), - Header: &block.Header, - Body: &block.Body, - Receipt: nil, - MessageQueue: nil, - Justification: nil, - }} - msg := &network.BlockResponseMessage{ - BlockData: bd, - } - - for _, bd := range msg.BlockData { - err = syncer.chainProcessor.(*chainProcessor).processBlockData(*bd) - require.NoError(t, err) - } -} - -func TestChainProcessor_ExecuteBlock(t *testing.T) { - syncer := newTestSyncer(t) - - parent, err := syncer.blockState.(*state.BlockState).BestBlockHeader() - require.NoError(t, err) - - bestBlockHash := syncer.blockState.(*state.BlockState).BestBlockHash() - runtimeInstance, err := syncer.blockState.GetRuntime(bestBlockHash) - require.NoError(t, err) - - babeConfig, err := runtimeInstance.BabeConfiguration() - require.NoError(t, err) - - timestamp := uint64(time.Now().Unix()) - slotDuration := babeConfig.SlotDuration - - // calculate the exact slot for each produced block - currentSlot := timestamp / slotDuration - block := buildBlockWithSlotAndTimestamp(t, runtimeInstance, parent, currentSlot, timestamp) - - // reset parentState - parentState, err := syncer.chainProcessor.(*chainProcessor).storageState.TrieState(&parent.StateRoot) - require.NoError(t, err) - runtimeInstance.SetContextStorage(parentState) - - _, err = runtimeInstance.ExecuteBlock(block) - require.NoError(t, err) -} - -func TestChainProcessor_HandleJustification(t *testing.T) { - syncer := newTestSyncer(t) - - d, err := types.NewBabeSecondaryPlainPreDigest(0, 1).ToPreRuntimeDigest() - require.NoError(t, err) - digest := types.NewDigest() - err = digest.Add(*d) - require.NoError(t, err) - - header := &types.Header{ - ParentHash: syncer.blockState.(*state.BlockState).GenesisHash(), - Number: 1, - Digest: digest, - } - - just := []byte("testjustification") - - err = syncer.blockState.(*state.BlockState).AddBlock(&types.Block{ - Header: *header, - Body: types.Body{}, - }) - require.NoError(t, err) - - err = syncer.chainProcessor.(*chainProcessor).handleJustification(header, just) - require.NoError(t, err) - - res, err := syncer.blockState.GetJustification(header.Hash()) - require.NoError(t, err) - require.Equal(t, just, res) -} - -func TestChainProcessor_processReadyBlocks_errFailedToGetParent(t *testing.T) { - syncer := newTestSyncer(t) - processor := syncer.chainProcessor.(*chainProcessor) - go processor.processReadyBlocks() - defer processor.cancel() - - header := &types.Header{ - Number: 1, - } - - processor.readyBlocks.push(&types.BlockData{ - Header: header, - Body: &types.Body{}, - }) - - time.Sleep(time.Millisecond * 100) - require.True(t, processor.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) -} diff --git a/dot/sync/chain_processor_test.go b/dot/sync/chain_processor_test.go deleted file mode 100644 index 8e794767ab..0000000000 --- a/dot/sync/chain_processor_test.go +++ /dev/null @@ -1,1181 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "errors" - "testing" - - "github.com/ChainSafe/gossamer/dot/telemetry" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/runtime/storage" - "github.com/ChainSafe/gossamer/lib/trie" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func Test_chainProcessor_handleBlock(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - testHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") - testParentHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") - - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - block *types.Block - announce bool - wantErr error - }{ - "handle_getHeader_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - chainProcessor.blockState = mockBlockState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: errFailedToGetParent, - }, - "handle_trieState_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, mockError) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_getRuntime_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(nil, mockError) - chainProcessor.blockState = mockBlockState - trieState := storage.NewTrieState(nil) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_runtime_ExecuteBlock_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(&types.Block{Body: types.Body{}}).Return(nil, mockError) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "handle_block_import_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - StateRoot: testHash, - }, nil) - mockBlock := &types.Block{Body: types.Body{}} - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(testParentHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&testHash).Return(trieState, nil) - mockStorageState.EXPECT().Unlock() - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, - trieState, false).Return(mockError) - chainProcessor.blockImportHandler = mockBlockImportHandler - return - }, - block: &types.Block{ - Body: types.Body{}, - }, - wantErr: mockError, - }, - "base_case": { - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlock := &types.Block{ - Body: types.Body{}, // empty slice of extrinsics - } - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := &types.Header{ - Number: 0, - StateRoot: trie.EmptyHash, - } - mockHeaderHash := mockHeader.Hash() - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, false).Return(nil) - chainProcessor.blockImportHandler = mockBlockImportHandler - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - chainProcessor.telemetry = mockTelemetry - return - }, - block: &types.Block{ - Header: types.Header{ - Number: 0, - }, - Body: types.Body{}, - }, - }, - "import_block_and_announce": { - announce: true, - chainProcessorBuilder: func(ctrl *gomock.Controller) (chainProcessor chainProcessor) { - mockBlock := &types.Block{ - Body: types.Body{}, // empty slice of extrinsics - } - trieState := storage.NewTrieState(nil) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := &types.Header{ - Number: 0, - StateRoot: trie.EmptyHash, - } - mockHeaderHash := mockHeader.Hash() - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(mockHeader, nil) - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(trieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState.EXPECT().GetRuntime(mockHeaderHash).Return(mockInstance, nil) - chainProcessor.blockState = mockBlockState - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&trie.EmptyHash).Return(trieState, nil) - chainProcessor.storageState = mockStorageState - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, trieState, true).Return(nil) - chainProcessor.blockImportHandler = mockBlockImportHandler - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - chainProcessor.telemetry = mockTelemetry - return - }, - block: &types.Block{ - Header: types.Header{ - Number: 0, - }, - Body: types.Body{}, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := tt.chainProcessorBuilder(ctrl) - - err := s.handleBlock(tt.block, tt.announce) - assert.ErrorIs(t, err, tt.wantErr) - }) - } - t.Run("panics_on_different_parent_state_root", func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - bock := &types.Block{ - Header: types.Header{ - ParentHash: common.Hash{1}, - }, - } - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(&types.Header{StateRoot: common.Hash{2}}, nil) - trieState := storage.NewTrieState(nil) - storageState := NewMockStorageState(ctrl) - lockCall := storageState.EXPECT().Lock() - trieStateCall := storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil).After(lockCall) - storageState.EXPECT().Unlock().After(trieStateCall) - chainProcessor := &chainProcessor{ - blockState: blockState, - storageState: storageState, - } - const expectedPanicValue = "parent state root does not match snapshot state root" - assert.PanicsWithValue(t, expectedPanicValue, func() { - _ = chainProcessor.handleBlock(bock, false) - }) - }) -} - -func Test_chainProcessor_handleBody(t *testing.T) { - t.Parallel() - - testExtrinsics := []types.Extrinsic{{1, 2, 3}, {7, 8, 9, 0}, {0xa, 0xb}} - testBody := types.NewBody(testExtrinsics) - - t.Run("base_case", func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - mockTransactionState := NewMockTransactionState(ctrl) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[0]) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[1]) - mockTransactionState.EXPECT().RemoveExtrinsic(testExtrinsics[2]) - processor := chainProcessor{ - transactionState: mockTransactionState, - } - processor.handleBody(testBody) - }) -} - -func Test_chainProcessor_handleJustification(t *testing.T) { - t.Parallel() - - header := &types.Header{ - Number: 2, - } - headerHash := header.Hash() - errTest := errors.New("test error") - - type args struct { - header *types.Header - justification []byte - } - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - args args - sentinelError error - errorMessage string - }{ - "invalid_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, - []byte(`x`)).Return(errTest) - return chainProcessor{ - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`x`), - }, - sentinelError: errTest, - errorMessage: "verifying block number 2 justification: test error", - }, - "set_justification_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().SetJustification(headerHash, []byte(`xx`)).Return(errTest) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`xx`)).Return(nil) - return chainProcessor{ - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`xx`), - }, - sentinelError: errTest, - errorMessage: "setting justification for block number 2: test error", - }, - "base_case_set": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().SetJustification(headerHash, []byte(`1234`)).Return(nil) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(headerHash, []byte(`1234`)).Return(nil) - return chainProcessor{ - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - } - }, - args: args{ - header: header, - justification: []byte(`1234`), - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := tt.chainProcessorBuilder(ctrl) - - err := processor.handleJustification(tt.args.header, tt.args.justification) - - assert.ErrorIs(t, err, tt.sentinelError) - if tt.sentinelError != nil { - assert.EqualError(t, err, tt.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processBlockData(t *testing.T) { - t.Parallel() - - mockError := errors.New("mock test error") - - tests := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - expectedError error - }{ - "handle_has_header_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, mockError) - - return chainProcessor{ - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_has_block_body_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, mockError) - return chainProcessor{ - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_getBlockByHash_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(nil, mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - return chainProcessor{ - blockState: mockBlockState, - chainSync: mockChainSync, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "handle_block_data_justification_!=_nil": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlock := &types.Block{ - Header: types.Header{ - Number: uint(1), - }, - } - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(true, nil) - mockBlockState.EXPECT().GetBlockByHash(common.Hash{}).Return(mockBlock, nil) - mockBlockState.EXPECT().AddBlockToBlockTree(&types.Block{ - Header: types.Header{Number: 1}}).Return(nil) - mockBlockState.EXPECT().SetJustification(common.MustHexToHash( - "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, 3}) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification(common.MustHexToHash( - "0x6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3"), []byte{1, 2, - 3}).Return(nil) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().TrieState(&common.Hash{}).Return(nil, nil) - - // given our current chain sync state is `tip` - // the `HandleBlockImport` method should expect - // true as the announce parameter - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(tip) - - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, - nil, true).Return(nil) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - finalityGadget: mockFinalityGadget, - storageState: mockStorageState, - blockImportHandler: mockBlockImportHandler, - } - }, - blockData: types.BlockData{ - Justification: &[]byte{1, 2, 3}, - }, - }, - "handle_babe_verify_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - babeVerifier: mockBabeVerifier, - } - }, - blockData: types.BlockData{ - Header: &types.Header{}, - Body: &types.Body{}, - }, - expectedError: mockError, - }, - "no_header_and_body_-_fail_to_handle_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().HasHeader(common.Hash{1}).Return(false, nil) - blockState.EXPECT().HasBlockBody(common.Hash{1}).Return(true, nil) - - finalityGadget := NewMockFinalityGadget(ctrl) - expectedBlockDataHeader := &types.Header{Number: 2} - expectedBlockDataHeaderHash := expectedBlockDataHeader.Hash() - finalityGadget.EXPECT(). - VerifyBlockJustification(expectedBlockDataHeaderHash, []byte{1, 2, 3}). - Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - return chainProcessor{ - chainSync: mockChainSync, - blockState: blockState, - finalityGadget: finalityGadget, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - Header: &types.Header{Number: 2}, - Justification: &[]byte{1, 2, 3}, - }, - expectedError: mockError, - }, - "handle_compareAndSetBlockData_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).Return(mockError) - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - } - }, - blockData: types.BlockData{}, - expectedError: mockError, - }, - "success_with_justification": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - stateRootHash := common.MustHexToHash("0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") - runtimeHash := common.MustHexToHash("0x7db9db5ed9967b80143100189ba69d9e4deab85ac3570e5df25686cabe32964a") - mockTrieState := storage.NewTrieState(nil) - mockBlock := &types.Block{Header: types.Header{}, Body: types.Body{}} - - mockInstance := NewMockInstance(ctrl) - mockInstance.EXPECT().SetContextStorage(mockTrieState) - mockInstance.EXPECT().ExecuteBlock(mockBlock).Return(nil, nil) - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{ - Number: 0, - StateRoot: stateRootHash, - }, nil) - mockBlockState.EXPECT().SetJustification( - common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), []byte{1, 2, 3}) - mockBlockState.EXPECT().CompareAndSetBlockData(gomock.AssignableToTypeOf(&types.BlockData{})) - mockBlockState.EXPECT().GetRuntime(runtimeHash).Return(mockInstance, nil) - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}) - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().TrieState(&stateRootHash).Return(mockTrieState, nil) - mockStorageState.EXPECT().Unlock() - - mockChainSync := NewMockChainSync(ctrl) - mockChainSync.EXPECT().syncState().Return(bootstrap) - - mockBlockImportHandler := NewMockBlockImportHandler(ctrl) - mockBlockImportHandler.EXPECT().HandleBlockImport(mockBlock, mockTrieState, false) - - mockTelemetry := NewMockTelemetry(ctrl) - mockTelemetry.EXPECT().SendMessage(gomock.Any()) - mockFinalityGadget := NewMockFinalityGadget(ctrl) - mockFinalityGadget.EXPECT().VerifyBlockJustification( - common.MustHexToHash("0xdcdd89927d8a348e00257e1ecc8617f45edb5118efff3ea2f9961b2ad9b7690a"), - []byte{1, 2, 3}).Return(nil) - return chainProcessor{ - chainSync: mockChainSync, - blockState: mockBlockState, - babeVerifier: mockBabeVerifier, - storageState: mockStorageState, - blockImportHandler: mockBlockImportHandler, - telemetry: mockTelemetry, - finalityGadget: mockFinalityGadget, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ - Number: 0, - }, - Body: &types.Body{}, - Justification: &[]byte{1, 2, 3}, - }, - }, - } - - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - processor := tt.chainProcessorBuilder(ctrl) - err := processor.processBlockData(tt.blockData) - assert.ErrorIs(t, err, tt.expectedError) - }) - } -} - -func Test_chainProcessor_processBlockDataWithStateHeaderAndBody(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - - testCases := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - announceImportedBlock bool - sentinelError error - errorMessage string - }{ - "get_block_by_hash_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetBlockByHash(common.Hash{1}). - Return(nil, errTest) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - sentinelError: errTest, - errorMessage: "getting block by hash: test error", - }, - "block_already_exists_in_blocktree": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - block := &types.Block{Header: types.Header{Number: 2}} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(blocktree.ErrBlockExists) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - }, - "add_block_to_blocktree_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - block := &types.Block{Header: types.Header{Number: 2}} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(errTest) - return chainProcessor{ - blockState: blockState, - } - }, - blockData: types.BlockData{Hash: common.Hash{1}}, - sentinelError: errTest, - errorMessage: "adding block to blocktree: test error", - }, - "handle_justification_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{Number: 2} - blockHeaderHash := blockHeader.Hash() - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - finalityGadget := NewMockFinalityGadget(ctrl) - finalityGadget.EXPECT(). - VerifyBlockJustification(blockHeaderHash, []byte{3}). - Return(errTest) - - return chainProcessor{ - blockState: blockState, - finalityGadget: finalityGadget, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - Justification: &[]byte{3}, - }, - sentinelError: errTest, - errorMessage: "handling justification: verifying block number 2 justification: test error", - }, - "trie_state_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(nil, errTest) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - sentinelError: errTest, - errorMessage: "loading trie state: test error", - }, - "handle_block_import_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(errTest) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - announceImportedBlock: true, - sentinelError: errTest, - errorMessage: "handling block import: test error", - }, - "success": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - blockState := NewMockBlockState(ctrl) - blockHeader := types.Header{StateRoot: common.Hash{2}} - block := &types.Block{Header: blockHeader} - blockState.EXPECT().GetBlockByHash(common.Hash{1}).Return(block, nil) - blockState.EXPECT().AddBlockToBlockTree(block).Return(nil) - - storageState := NewMockStorageState(ctrl) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&common.Hash{2}). - Return(trieState, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(nil) - - return chainProcessor{ - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - } - }, - blockData: types.BlockData{ - Hash: common.Hash{1}, - }, - announceImportedBlock: true, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := testCase.chainProcessorBuilder(ctrl) - - err := processor.processBlockDataWithStateHeaderAndBody( - testCase.blockData, testCase.announceImportedBlock) - - assert.ErrorIs(t, err, testCase.sentinelError) - if testCase.sentinelError != nil { - assert.EqualError(t, err, testCase.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processBlockDataWithHeaderAndBody(t *testing.T) { - t.Parallel() - - errTest := errors.New("test error") - - testCases := map[string]struct { - chainProcessorBuilder func(ctrl *gomock.Controller) chainProcessor - blockData types.BlockData - announceImportedBlock bool - sentinelError error - errorMessage string - }{ - "verify_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - babeVerifier.EXPECT().VerifyBlock(&types.Header{Number: 1}). - Return(errTest) - - return chainProcessor{ - babeVerifier: babeVerifier, - } - }, - blockData: types.BlockData{ - Header: &types.Header{Number: 1}, - }, - sentinelError: errTest, - errorMessage: "babe verifying block: test error", - }, - "handle_block_error": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - expectedHeader := &types.Header{ParentHash: common.Hash{1}} - babeVerifier.EXPECT().VerifyBlock(expectedHeader). - Return(nil) - - transactionState := NewMockTransactionState(ctrl) - transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) - - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(nil, errTest) - - return chainProcessor{ - babeVerifier: babeVerifier, - transactionState: transactionState, - blockState: blockState, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ParentHash: common.Hash{1}}, - Body: &types.Body{{2}}, - }, - sentinelError: errFailedToGetParent, - errorMessage: "handling block: failed to get parent header: test error", - }, - "success": { - chainProcessorBuilder: func(ctrl *gomock.Controller) chainProcessor { - babeVerifier := NewMockBabeVerifier(ctrl) - expectedHeader := &types.Header{ - ParentHash: common.Hash{1}, - Number: 5, - } - babeVerifier.EXPECT().VerifyBlock(expectedHeader). - Return(nil) - - transactionState := NewMockTransactionState(ctrl) - transactionState.EXPECT().RemoveExtrinsic(types.Extrinsic{2}) - - blockState := NewMockBlockState(ctrl) - parentHeader := &types.Header{StateRoot: trie.EmptyHash} - blockState.EXPECT().GetHeader(common.Hash{1}). - Return(parentHeader, nil) - - storageState := NewMockStorageState(ctrl) - lockCall := storageState.EXPECT().Lock() - storageState.EXPECT().Unlock().After(lockCall) - trieState := storage.NewTrieState(nil) - storageState.EXPECT().TrieState(&trie.EmptyHash). - Return(trieState, nil) - - parentHeaderHash := parentHeader.Hash() - instance := NewMockInstance(ctrl) - blockState.EXPECT().GetRuntime(parentHeaderHash). - Return(instance, nil) - - instance.EXPECT().SetContextStorage(trieState) - block := &types.Block{ - Header: *expectedHeader, - Body: types.Body{{2}}, - } - instance.EXPECT().ExecuteBlock(block).Return(nil, nil) - - blockImportHandler := NewMockBlockImportHandler(ctrl) - const announceImportedBlock = true - blockImportHandler.EXPECT().HandleBlockImport(block, trieState, announceImportedBlock). - Return(nil) - - telemetryClient := NewMockTelemetry(ctrl) - headerHash := common.MustHexToHash("0x18d21d2901e4a4ac6a8c6431da2dfee1b8701f31a9e49283a082e6c744d4117c") - message := telemetry.NewBlockImport(&headerHash, expectedHeader.Number, "NetworkInitialSync") - telemetryClient.EXPECT().SendMessage(message) - - return chainProcessor{ - babeVerifier: babeVerifier, - transactionState: transactionState, - blockState: blockState, - storageState: storageState, - blockImportHandler: blockImportHandler, - telemetry: telemetryClient, - } - }, - blockData: types.BlockData{ - Header: &types.Header{ - ParentHash: common.Hash{1}, - Number: 5, - }, - Body: &types.Body{{2}}, - }, - announceImportedBlock: true, - }, - } - - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - - processor := testCase.chainProcessorBuilder(ctrl) - - err := processor.processBlockDataWithHeaderAndBody( - testCase.blockData, testCase.announceImportedBlock) - - assert.ErrorIs(t, err, testCase.sentinelError) - if testCase.sentinelError != nil { - assert.EqualError(t, err, testCase.errorMessage) - } - }) - } -} - -func Test_chainProcessor_processReadyBlocks(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - tests := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller) ChainSync - blockStateBuilder func(ctrl *gomock.Controller, done chan struct{}) BlockState - blockData *types.BlockData - babeVerifierBuilder func(ctrl *gomock.Controller) BabeVerifier - pendingBlockBuilder func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet - storageStateBuilder func(ctrl *gomock.Controller, done chan struct{}) StorageState - }{ - "base_case": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().CompareAndSetBlockData(&types.BlockData{}).DoAndReturn(func(*types. - BlockData) error { - close(done) - return nil - }) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - return nil - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - return nil - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - "add_block": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ - Header: types.Header{}, - Body: types.Body{}, - }).DoAndReturn(func(block *types.Block) error { - close(done) - return nil - }) - return mockDisjointBlockSet - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - "error_in_process_block": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(&types.Header{}, nil) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - return nil - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - mockStorageState := NewMockStorageState(ctrl) - mockStorageState.EXPECT().Lock() - mockStorageState.EXPECT().Unlock() - mockStorageState.EXPECT().TrieState(&common.Hash{}).DoAndReturn(func(hash *common.Hash) (*storage. - TrieState, error) { - close(done) - return nil, mockError - }) - return mockStorageState - }, - }, - "add_block_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) ChainSync { - cs := NewMockChainSync(ctrl) - cs.EXPECT().syncState().Return(bootstrap) - return cs - }, - blockStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().HasBlockBody(common.Hash{}).Return(false, nil) - mockBlockState.EXPECT().GetHeader(common.Hash{}).Return(nil, mockError) - return mockBlockState - }, - blockData: &types.BlockData{ - Hash: common.Hash{}, - Header: &types.Header{}, - Body: &types.Body{}, - }, - babeVerifierBuilder: func(ctrl *gomock.Controller) BabeVerifier { - mockBabeVerifier := NewMockBabeVerifier(ctrl) - mockBabeVerifier.EXPECT().VerifyBlock(&types.Header{}).Return(nil) - return mockBabeVerifier - }, - pendingBlockBuilder: func(ctrl *gomock.Controller, done chan struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().addBlock(&types.Block{ - Header: types.Header{}, - Body: types.Body{}, - }).DoAndReturn(func(block *types.Block) error { - close(done) - return mockError - }) - return mockDisjointBlockSet - }, - storageStateBuilder: func(ctrl *gomock.Controller, done chan struct{}) StorageState { - return nil - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - ctx, cancel := context.WithCancel(context.Background()) - readyBlock := newBlockQueue(5) - done := make(chan struct{}) - - s := &chainProcessor{ - ctx: ctx, - cancel: cancel, - readyBlocks: readyBlock, - chainSync: tt.chainSyncBuilder(ctrl), - blockState: tt.blockStateBuilder(ctrl, done), - babeVerifier: tt.babeVerifierBuilder(ctrl), - pendingBlocks: tt.pendingBlockBuilder(ctrl, done), - storageState: tt.storageStateBuilder(ctrl, done), - } - - go s.processReadyBlocks() - - readyBlock.push(tt.blockData) - <-done - s.cancel() - }) - } -} - -func Test_newChainProcessor(t *testing.T) { - t.Parallel() - - mockReadyBlock := newBlockQueue(5) - mockDisjointBlockSet := NewMockDisjointBlockSet(nil) - mockBlockState := NewMockBlockState(nil) - mockStorageState := NewMockStorageState(nil) - mockTransactionState := NewMockTransactionState(nil) - mockBabeVerifier := NewMockBabeVerifier(nil) - mockFinalityGadget := NewMockFinalityGadget(nil) - mockBlockImportHandler := NewMockBlockImportHandler(nil) - - type args struct { - readyBlocks *blockQueue - pendingBlocks DisjointBlockSet - blockState BlockState - storageState StorageState - transactionState TransactionState - babeVerifier BabeVerifier - finalityGadget FinalityGadget - blockImportHandler BlockImportHandler - } - tests := []struct { - name string - args args - want *chainProcessor - }{ - { - name: "with_args", - args: args{ - readyBlocks: mockReadyBlock, - pendingBlocks: mockDisjointBlockSet, - blockState: mockBlockState, - storageState: mockStorageState, - transactionState: mockTransactionState, - babeVerifier: mockBabeVerifier, - finalityGadget: mockFinalityGadget, - blockImportHandler: mockBlockImportHandler, - }, - want: &chainProcessor{ - readyBlocks: mockReadyBlock, - pendingBlocks: mockDisjointBlockSet, - blockState: mockBlockState, - storageState: mockStorageState, - transactionState: mockTransactionState, - babeVerifier: mockBabeVerifier, - finalityGadget: mockFinalityGadget, - blockImportHandler: mockBlockImportHandler, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cpCfg := chainProcessorConfig{ - readyBlocks: tt.args.readyBlocks, - pendingBlocks: tt.args.pendingBlocks, - blockState: tt.args.blockState, - storageState: tt.args.storageState, - transactionState: tt.args.transactionState, - babeVerifier: tt.args.babeVerifier, - finalityGadget: tt.args.finalityGadget, - blockImportHandler: tt.args.blockImportHandler, - } - - got := newChainProcessor(cpCfg) - assert.NotNil(t, got.ctx) - got.ctx = nil - assert.NotNil(t, got.cancel) - got.cancel = nil - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/dot/sync/chain_sync.go b/dot/sync/chain_sync.go index 48e5c1232e..73372ac9d7 100644 --- a/dot/sync/chain_sync.go +++ b/dot/sync/chain_sync.go @@ -4,16 +4,16 @@ package sync import ( - "context" - "crypto/rand" + "bytes" "errors" "fmt" "math/big" "strings" "sync" + "sync/atomic" "time" - "github.com/ChainSafe/chaindb" + "github.com/cockroachdb/pebble" "github.com/libp2p/go-libp2p/core/peer" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -21,18 +21,14 @@ import ( "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/variadic" ) -const ( - // maxWorkers is the maximum number of parallel sync workers - maxWorkers = 12 -) - -var _ ChainSync = &chainSync{} +var _ ChainSync = (*chainSync)(nil) type chainSyncState byte @@ -53,692 +49,716 @@ func (s chainSyncState) String() string { } var ( - pendingBlocksLimit = maxResponseSize * 32 + pendingBlocksLimit = network.MaxBlocksInResponse * 32 isSyncedGauge = promauto.NewGauge(prometheus.GaugeOpts{ Namespace: "gossamer_network_syncer", Name: "is_synced", Help: "bool representing whether the node is synced to the head of the chain", }) + + blockSizeGauge = promauto.NewGauge(prometheus.GaugeOpts{ + Namespace: "gossamer_sync", + Name: "block_size", + Help: "represent the size of blocks synced", + }) ) -// peerState tracks our peers's best reported blocks -type peerState struct { +// peerView tracks our peers's best reported blocks +type peerView struct { who peer.ID hash common.Hash number uint } -// workHandler handles new potential work (ie. reported peer state, block announces), results from dispatched workers, -// and stored pending work (ie. pending blocks set) -// workHandler should be implemented by `bootstrapSync` and `tipSync` -type workHandler interface { - // handleNewPeerState returns a new worker based on a peerState. - // The worker may be nil in which case we do nothing. - handleNewPeerState(*peerState) (*worker, error) - - // handleWorkerResult handles the result of a worker, which may be - // nil or error. It optionally returns a new worker to be dispatched. - handleWorkerResult(w *worker) (workerToRetry *worker, err error) - - // hasCurrentWorker is called before a worker is to be dispatched to - // check whether it is a duplicate. this function returns whether there is - // a worker that covers the scope of the proposed worker; if true, - // ignore the proposed worker - hasCurrentWorker(*worker, map[uint64]*worker) bool - - // handleTick handles a timer tick - handleTick() ([]*worker, error) -} - // ChainSync contains the methods used by the high-level service into the `chainSync` module type ChainSync interface { start() stop() - // called upon receiving a BlockAnnounce - setBlockAnnounce(from peer.ID, header *types.Header) error - // called upon receiving a BlockAnnounceHandshake - setPeerHead(p peer.ID, hash common.Hash, number uint) error + setPeerHead(p peer.ID, hash common.Hash, number uint) - // syncState returns the current syncing state - syncState() chainSyncState + // getSyncMode returns the current syncing state + getSyncMode() chainSyncState // getHighestBlock returns the highest block or an error getHighestBlock() (highestBlock uint, err error) + + onBlockAnnounce(announcedBlock) error +} + +type announcedBlock struct { + who peer.ID + header *types.Header } type chainSync struct { - ctx context.Context - cancel context.CancelFunc + stopCh chan struct{} blockState BlockState network Network - // queue of work created by setting peer heads - workQueue chan *peerState - - // workers are put here when they are completed so we can handle their result - resultQueue chan *worker + workerPool *syncWorkerPool // tracks the latest state we know of from our peers, // ie. their best block hash and number - sync.RWMutex - peerState map[peer.ID]*peerState - ignorePeers map[peer.ID]struct{} - - // current workers that are attempting to obtain blocks - workerState *workerState - - // blocks which are ready to be processed are put into this queue - // the `chainProcessor` will read from this channel and process the blocks - // note: blocks must not be put into this channel unless their parent is known - // - // there is a case where we request and process "duplicate" blocks, which is where there - // are some blocks in this queue, and at the same time, the bootstrap worker errors and dispatches - // a new worker with start=(current best head), which results in the blocks in the queue - // getting re-requested (as they have not been processed yet) - // to fix this, we track the blocks that are in the queue - readyBlocks *blockQueue + peerViewLock sync.RWMutex + peerView map[peer.ID]*peerView // disjoint set of blocks which are known but not ready to be processed // ie. we only know the hash, number, or the parent block is unknown, or the body is unknown // note: the block may have empty fields, as some data about it may be unknown - pendingBlocks DisjointBlockSet - pendingBlockDoneCh chan<- struct{} - - // bootstrap or tip (near-head) - state chainSyncState + pendingBlocks DisjointBlockSet - // handler is set to either `bootstrapSyncer` or `tipSyncer`, depending on the current - // chain sync state - handler workHandler - - benchmarker *syncBenchmarker + syncMode atomic.Value finalisedCh <-chan *types.FinalisationInfo - minPeers int - maxWorkerRetries uint16 - slotDuration time.Duration - - logSyncTicker *time.Ticker - logSyncTickerC <-chan time.Time // channel as field for unit testing - logSyncStarted bool - logSyncDone chan struct{} - badBlocks []string + minPeers int + slotDuration time.Duration - blockReqRes network.RequestMaker + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + telemetry Telemetry + badBlocks []string + requestMaker network.RequestMaker } type chainSyncConfig struct { bs BlockState net Network - readyBlocks *blockQueue + requestMaker network.RequestMaker pendingBlocks DisjointBlockSet minPeers, maxPeers int slotDuration time.Duration + storageState StorageState + transactionState TransactionState + babeVerifier BabeVerifier + finalityGadget FinalityGadget + blockImportHandler BlockImportHandler + telemetry Telemetry badBlocks []string } -func newChainSync(cfg chainSyncConfig, blockReqRes network.RequestMaker) *chainSync { - ctx, cancel := context.WithCancel(context.Background()) - const syncSamplesToKeep = 30 - const logSyncPeriod = 5 * time.Second - logSyncTicker := time.NewTicker(logSyncPeriod) - +func newChainSync(cfg chainSyncConfig) *chainSync { + atomicState := atomic.Value{} + atomicState.Store(tip) return &chainSync{ - ctx: ctx, - cancel: cancel, - blockState: cfg.bs, - network: cfg.net, - workQueue: make(chan *peerState, 1024), - resultQueue: make(chan *worker, 1024), - peerState: make(map[peer.ID]*peerState), - ignorePeers: make(map[peer.ID]struct{}), - workerState: newWorkerState(), - readyBlocks: cfg.readyBlocks, - pendingBlocks: cfg.pendingBlocks, - state: bootstrap, - handler: newBootstrapSyncer(cfg.bs), - benchmarker: newSyncBenchmarker(syncSamplesToKeep), - finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), - minPeers: cfg.minPeers, - maxWorkerRetries: uint16(cfg.maxPeers), - slotDuration: cfg.slotDuration, - logSyncTicker: logSyncTicker, - logSyncTickerC: logSyncTicker.C, - logSyncDone: make(chan struct{}), - badBlocks: cfg.badBlocks, - blockReqRes: blockReqRes, + stopCh: make(chan struct{}), + storageState: cfg.storageState, + transactionState: cfg.transactionState, + babeVerifier: cfg.babeVerifier, + finalityGadget: cfg.finalityGadget, + blockImportHandler: cfg.blockImportHandler, + telemetry: cfg.telemetry, + blockState: cfg.bs, + network: cfg.net, + peerView: make(map[peer.ID]*peerView), + pendingBlocks: cfg.pendingBlocks, + syncMode: atomicState, + finalisedCh: cfg.bs.GetFinalisedNotifierChannel(), + minPeers: cfg.minPeers, + slotDuration: cfg.slotDuration, + workerPool: newSyncWorkerPool(cfg.net, cfg.requestMaker), + badBlocks: cfg.badBlocks, + requestMaker: cfg.requestMaker, } } -func (cs *chainSync) start() { - // wait until we have received at least `minPeers` peer heads - for { - cs.RLock() - n := len(cs.peerState) - cs.RUnlock() - if n >= cs.minPeers { - break - } - time.Sleep(time.Millisecond * 100) - } +func (cs *chainSync) waitEnoughPeersAndTarget() <-chan struct{} { + resultCh := make(chan struct{}) + go func() { + defer close(resultCh) + for { + select { + case <-resultCh: + return + default: + } - isSyncedGauge.Set(float64(cs.state)) + cs.workerPool.useConnectedPeers() + _, err := cs.getTarget() + totalAvailable := cs.workerPool.totalWorkers() + if totalAvailable >= uint(cs.minPeers) && err == nil { + return + } - pendingBlockDoneCh := make(chan struct{}) - cs.pendingBlockDoneCh = pendingBlockDoneCh - go cs.pendingBlocks.run(pendingBlockDoneCh) - go cs.sync() - cs.logSyncStarted = true - go cs.logSyncSpeed() -} + time.Sleep(100 * time.Millisecond) + } + }() -func (cs *chainSync) stop() { - if cs.pendingBlockDoneCh != nil { - close(cs.pendingBlockDoneCh) - } - cs.cancel() - if cs.logSyncStarted { - <-cs.logSyncDone - } + return resultCh } -func (cs *chainSync) syncState() chainSyncState { - return cs.state -} +func (cs *chainSync) start() { + isSyncedGauge.Set(0) -func (cs *chainSync) setBlockAnnounce(from peer.ID, header *types.Header) error { - // check if we already know of this block, if not, - // add to pendingBlocks set - has, err := cs.blockState.HasHeader(header.Hash()) - if err != nil { - return err - } + // wait until we have a minimal workers in the sync worker pool + resultCh := cs.waitEnoughPeersAndTarget() - if has { - return blocktree.ErrBlockExists + select { + case <-resultCh: + case <-cs.stopCh: + return } - if err = cs.pendingBlocks.addHeader(header); err != nil { - return err - } + go cs.pendingBlocks.run(cs.finalisedCh, cs.stopCh) + go cs.workerPool.listenForRequests(cs.stopCh) - // we assume that if a peer sends us a block announce for a certain block, - // that is also has the chain up until and including that block. - // this may not be a valid assumption, but perhaps we can assume that - // it is likely they will receive this block and its ancestors before us. - return cs.setPeerHead(from, header.Hash(), header.Number) + cs.syncMode.Store(bootstrap) + go cs.bootstrapSync() +} + +func (cs *chainSync) stop() { + close(cs.stopCh) + <-cs.workerPool.doneCh } -// setPeerHead sets a peer's best known block and potentially adds the peer's state to the workQueue -func (cs *chainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error { - ps := &peerState{ - who: p, - hash: hash, - number: number, +func (cs *chainSync) isFarFromTarget() (bestBlockHeader *types.Header, syncTarget uint, + isFarFromTarget bool, err error) { + syncTarget, err = cs.getTarget() + if err != nil { + return nil, syncTarget, false, fmt.Errorf("getting target: %w", err) } - cs.Lock() - cs.peerState[p] = ps - cs.Unlock() - // if the peer reports a lower or equal best block number than us, - // check if they are on a fork or not - head, err := cs.blockState.BestBlockHeader() + bestBlockHeader, err = cs.blockState.BestBlockHeader() if err != nil { - return fmt.Errorf("best block header: %w", err) + return nil, syncTarget, false, fmt.Errorf("getting best block header: %w", err) } - if ps.number <= head.Number { - // check if our block hash for that number is the same, if so, do nothing - // as we already have that block - ourHash, err := cs.blockState.GetHashByNumber(ps.number) - if err != nil { - return fmt.Errorf("get block hash by number: %w", err) + bestBlockNumber := bestBlockHeader.Number + isFarFromTarget = bestBlockNumber+network.MaxBlocksInResponse < syncTarget + return bestBlockHeader, syncTarget, isFarFromTarget, nil +} + +func (cs *chainSync) bootstrapSync() { + for { + select { + case <-cs.stopCh: + logger.Warn("ending bootstrap sync, chain sync stop channel triggered") + return + default: } - if ourHash == ps.hash { - return nil + bestBlockHeader, syncTarget, isFarFromTarget, err := cs.isFarFromTarget() + if err != nil && !errors.Is(err, errNoPeerViews) { + logger.Criticalf("ending bootstrap sync, checking target distance: %s", err) + return } - // check if their best block is on an invalid chain, if it is, - // potentially downscore them - // for now, we can remove them from the syncing peers set - fin, err := cs.blockState.GetHighestFinalisedHeader() + finalisedHeader, err := cs.blockState.GetHighestFinalisedHeader() if err != nil { - return fmt.Errorf("get highest finalised header: %w", err) + logger.Criticalf("getting highest finalized header: %w", err) + return } - // their block hash doesn't match ours for that number (ie. they are on a different - // chain), and also the highest finalised block is higher than that number. - // thus the peer is on an invalid chain - if fin.Number >= ps.number { - // TODO: downscore this peer, or temporarily don't sync from them? (#1399) - // perhaps we need another field in `peerState` to mark whether the state is valid or not - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, p) - return fmt.Errorf("%w: for peer %s and block number %d", - errPeerOnInvalidFork, p, ps.number) + logger.Infof( + "🚣 currently syncing, %d peers connected, "+ + "%d available workers, "+ + "target block number %d, "+ + "finalised block number %d with hash %s", + len(cs.network.Peers()), + cs.workerPool.totalWorkers(), + syncTarget, finalisedHeader.Number, finalisedHeader.Hash()) + + if isFarFromTarget { + cs.workerPool.useConnectedPeers() + err = cs.requestMaxBlocksFrom(bestBlockHeader) + if err != nil { + logger.Errorf("requesting max blocks from best block header: %s", err) + } + } else { + // we are less than 128 blocks behind the target we can use tip sync + cs.syncMode.Store(tip) + isSyncedGauge.Set(1) + logger.Debugf("switched sync mode to %d", tip.String()) + return } + } +} - // peer is on a fork, check if we have processed the fork already or not - // ie. is their block written to our db? - has, err := cs.blockState.HasHeader(ps.hash) - if err != nil { - return fmt.Errorf("has header: %w", err) - } +func (cs *chainSync) getSyncMode() chainSyncState { + return cs.syncMode.Load().(chainSyncState) +} - // if so, do nothing, as we already have their fork - if has { - return nil - } +// setPeerHead sets a peer's best known block +func (cs *chainSync) setPeerHead(who peer.ID, bestHash common.Hash, bestNumber uint) { + cs.workerPool.fromBlockAnnounce(who) + + cs.peerViewLock.Lock() + defer cs.peerViewLock.Unlock() + + logger.Debugf("sync peer view: %s, best hash: %s, best number: #%d", who, bestHash.Short(), bestNumber) + cs.peerView[who] = &peerView{ + who: who, + hash: bestHash, + number: bestNumber, + } +} + +func (cs *chainSync) onBlockAnnounce(announced announcedBlock) error { + if cs.pendingBlocks.hasBlock(announced.header.Hash()) { + return fmt.Errorf("%w: block %s (#%d)", + errAlreadyInDisjointSet, announced.header.Hash(), announced.header.Number) + } + + err := cs.pendingBlocks.addHeader(announced.header) + if err != nil { + return fmt.Errorf("while adding pending block header: %w", err) + } + + if cs.getSyncMode() == bootstrap { + return nil + } + + _, _, isFarFromTarget, err := cs.isFarFromTarget() + if err != nil && !errors.Is(err, errNoPeerViews) { + return fmt.Errorf("checking target distance: %w", err) } - // the peer has a higher best block than us, or they are on some fork we are not aware of - // add it to the disjoint block set - if err = cs.pendingBlocks.addHashAndNumber(ps.hash, ps.number); err != nil { - return fmt.Errorf("add hash and number: %w", err) + if !isFarFromTarget { + return cs.requestAnnouncedBlock(announced) } - cs.workQueue <- ps - logger.Debugf("set peer %s head with block number %d and hash %s", p, number, hash) + // we are more than 128 blocks behind the head, switch to bootstrap + cs.syncMode.Store(bootstrap) + isSyncedGauge.Set(0) + logger.Debugf("switched sync mode to %d", bootstrap.String()) + go cs.bootstrapSync() return nil } -func (cs *chainSync) logSyncSpeed() { - defer close(cs.logSyncDone) - defer cs.logSyncTicker.Stop() +func (cs *chainSync) requestAnnouncedBlock(announce announcedBlock) error { + peerWhoAnnounced := announce.who + announcedHash := announce.header.Hash() + announcedNumber := announce.header.Number - for { - before, err := cs.blockState.BestBlockHeader() - if err != nil { - continue - } + has, err := cs.blockState.HasHeader(announcedHash) + if err != nil { + return fmt.Errorf("checking if header exists: %s", err) + } - if cs.state == bootstrap { - cs.benchmarker.begin(time.Now(), before.Number) - } + if has { + return nil + } - select { - case <-cs.logSyncTickerC: // channel of cs.logSyncTicker - case <-cs.ctx.Done(): - return - } + bestBlockHeader, err := cs.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("getting best block header: %w", err) + } - finalised, err := cs.blockState.GetHighestFinalisedHeader() + // if the announced block contains a lower number than our best + // block header, let's check if it is greater than our latests + // finalized header, if so this block belongs to a fork chain + if announcedNumber < bestBlockHeader.Number { + highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() if err != nil { - continue + return fmt.Errorf("getting highest finalized header") } - after, err := cs.blockState.BestBlockHeader() - if err != nil { - continue + // ignore the block if it has the same or lower number + // TODO: is it following the protocol to send a blockAnnounce with number < highestFinalized number? + if announcedNumber <= highestFinalizedHeader.Number { + return nil } - switch cs.state { - case bootstrap: - cs.benchmarker.end(time.Now(), after.Number) - target := cs.getTarget() - - logger.Infof( - "🔗 imported blocks from %d to %d (hashes [%s ... %s])", - before.Number, after.Number, before.Hash(), after.Hash()) - - logger.Infof( - "🚣 currently syncing, %d peers connected, "+ - "target block number %d, %.2f average blocks/second, "+ - "%.2f overall average, finalised block number %d with hash %s", - len(cs.network.Peers()), - target, cs.benchmarker.mostRecentAverage(), - cs.benchmarker.average(), finalised.Number, finalised.Hash()) - case tip: - logger.Infof( - "💤 node waiting, %d peers connected, "+ - "head block number %d with hash %s, "+ - "finalised block number %d with hash %s", - len(cs.network.Peers()), - after.Number, after.Hash(), - finalised.Number, finalised.Hash()) - } + return cs.requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announce.header, announce.who) } -} -func (cs *chainSync) ignorePeer(who peer.ID) { - if err := who.Validate(); err != nil { - return + err = cs.requestChainBlocks(announce.header, bestBlockHeader, peerWhoAnnounced) + if err != nil { + return fmt.Errorf("requesting chain blocks: %w", err) } - cs.Lock() - cs.ignorePeers[who] = struct{}{} - cs.Unlock() + highestFinalizedHeader, err := cs.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("while getting highest finalized header: %w", err) + } + + err = cs.requestPendingBlocks(highestFinalizedHeader) + if err != nil { + return fmt.Errorf("while requesting pending blocks") + } + + return nil } -func (cs *chainSync) sync() { - // set to slot time - ticker := time.NewTicker(cs.slotDuration) +func (cs *chainSync) requestChainBlocks(announcedHeader, bestBlockHeader *types.Header, + peerWhoAnnounced peer.ID) error { + gapLength := uint32(announcedHeader.Number - bestBlockHeader.Number) + startAtBlock := announcedHeader.Number + totalBlocks := uint32(1) - for { - select { - case ps := <-cs.workQueue: - cs.maybeSwitchMode() + var request *network.BlockRequestMessage + startingBlock := *variadic.MustNewUint32OrHash(announcedHeader.Hash()) - if err := cs.handleWork(ps); err != nil { - logger.Errorf("failed to handle chain sync work: %s", err) - } - case res := <-cs.resultQueue: - if err := cs.handleResult(res); err != nil { - logger.Errorf("failed to handle chain sync result: %s", err) - } - case <-ticker.C: - cs.maybeSwitchMode() + if gapLength > 1 { + request = network.NewBlockRequest(startingBlock, gapLength, + network.BootstrapRequestData, network.Descending) - workers, err := cs.handler.handleTick() - if err != nil { - logger.Errorf("failed to handle tick: %s", err) - continue - } + startAtBlock = announcedHeader.Number - uint(*request.Max) + 1 + totalBlocks = *request.Max - for _, worker := range workers { - cs.tryDispatchWorker(worker) - } - case fin := <-cs.finalisedCh: - // on finalised block, call pendingBlocks.removeLowerBlocks() to remove blocks on - // invalid forks from the pending blocks set - cs.pendingBlocks.removeLowerBlocks(fin.Header.Number) - case <-cs.ctx.Done(): - return - } + logger.Debugf("received a block announce from %s, requesting %d blocks, descending request from %s (#%d)", + peerWhoAnnounced, gapLength, announcedHeader.Hash(), announcedHeader.Number) + } else { + request = network.NewBlockRequest(startingBlock, 1, network.BootstrapRequestData, network.Descending) + logger.Debugf("received a block announce from %s, requesting a single block %s (#%d)", + peerWhoAnnounced, announcedHeader.Hash(), announcedHeader.Number) } -} -func (cs *chainSync) maybeSwitchMode() { - head, err := cs.blockState.BestBlockHeader() + resultsQueue := make(chan *syncTaskResult) + cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + err := cs.handleWorkersResults(resultsQueue, startAtBlock, totalBlocks) if err != nil { - logger.Errorf("failed to get best block header: %s", err) - return + return fmt.Errorf("while handling workers results: %w", err) } - target := cs.getTarget() - switch { - case head.Number+maxResponseSize < target: - // we are at least 128 blocks behind the head, switch to bootstrap - cs.setMode(bootstrap) - case head.Number >= target: - // bootstrap complete, switch state to tip if not already - // and begin near-head fork-sync - cs.setMode(tip) - default: - // head is between (target-128, target), and we don't want to switch modes. - } + return nil } -func (cs *chainSync) handleResult(resultWorker *worker) error { - // delete worker from workers map - cs.workerState.delete(resultWorker.id) +func (cs *chainSync) requestForkBlocks(bestBlockHeader, highestFinalizedHeader, announcedHeader *types.Header, + peerWhoAnnounced peer.ID) error { + logger.Debugf("block announce lower than best block %s (#%d) and greater highest finalized %s (#%d)", + bestBlockHeader.Hash(), bestBlockHeader.Number, highestFinalizedHeader.Hash(), highestFinalizedHeader.Number) - // handle results from worker - // if there is an error, potentially retry the worker - if resultWorker.err == nil || resultWorker.ctx.Err() != nil { - return nil //nolint:nilerr + parentExists, err := cs.blockState.HasHeader(announcedHeader.ParentHash) + if err != nil && !errors.Is(err, pebble.ErrNotFound) { + return fmt.Errorf("while checking header exists: %w", err) } - logger.Debugf("worker id %d failed: %s", resultWorker.id, resultWorker.err.err) + gapLength := uint32(1) + startAtBlock := announcedHeader.Number + announcedHash := announcedHeader.Hash() + var request *network.BlockRequestMessage + startingBlock := *variadic.MustNewUint32OrHash(announcedHash) - // handle errors. in the case that a peer did not respond to us in time, - // temporarily add them to the ignore list. - switch { - case errors.Is(resultWorker.err.err, context.Canceled): - return nil - case errors.Is(resultWorker.err.err, errNoPeers): - logger.Debugf("worker id %d not able to sync with any peer", resultWorker.id) - return nil - case errors.Is(resultWorker.err.err, context.DeadlineExceeded): - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.TimeOutValue, - Reason: peerset.TimeOutReason, - }, resultWorker.err.who) - cs.ignorePeer(resultWorker.err.who) - case strings.Contains(resultWorker.err.err.Error(), "dial backoff"): - cs.ignorePeer(resultWorker.err.who) - return nil - case resultWorker.err.err.Error() == "protocol not supported": - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadProtocolValue, - Reason: peerset.BadProtocolReason, - }, resultWorker.err.who) - cs.ignorePeer(resultWorker.err.who) - return nil + if parentExists { + request = network.NewBlockRequest(startingBlock, 1, network.BootstrapRequestData, network.Descending) + } else { + gapLength = uint32(announcedHeader.Number - highestFinalizedHeader.Number) + startAtBlock = highestFinalizedHeader.Number + 1 + request = network.NewBlockRequest(startingBlock, gapLength, network.BootstrapRequestData, network.Descending) } - worker, err := cs.handler.handleWorkerResult(resultWorker) + logger.Debugf("requesting %d fork blocks, starting at %s (#%d)", + peerWhoAnnounced, gapLength, announcedHash, announcedHeader.Number) + + resultsQueue := make(chan *syncTaskResult) + cs.workerPool.submitBoundedRequest(request, peerWhoAnnounced, resultsQueue) + + err = cs.handleWorkersResults(resultsQueue, startAtBlock, gapLength) if err != nil { - logger.Errorf("failed to handle worker result: %s", err) - return err - } else if worker == nil { - return nil + return fmt.Errorf("while handling workers results: %w", err) } - worker.retryCount = resultWorker.retryCount + 1 - if worker.retryCount > cs.maxWorkerRetries { - logger.Debugf( - "discarding worker id %d: maximum retry count %d reached", - worker.id, cs.maxWorkerRetries) - - // if this worker was triggered due to a block in the pending blocks set, - // we want to remove it from the set, as we asked all our peers for it - // and none replied with the info we need. - if worker.pendingBlock != nil { - cs.pendingBlocks.removeBlock(worker.pendingBlock.hash) - } + return nil +} + +func (cs *chainSync) requestPendingBlocks(highestFinalizedHeader *types.Header) error { + pendingBlocksTotal := cs.pendingBlocks.size() + logger.Infof("total of pending blocks: %d", pendingBlocksTotal) + if pendingBlocksTotal < 1 { return nil } - // if we've already tried a peer and there was an error, - // then we shouldn't try them again. - if resultWorker.peersTried != nil { - worker.peersTried = resultWorker.peersTried - } else { - worker.peersTried = make(map[peer.ID]struct{}) + pendingBlocks := cs.pendingBlocks.getBlocks() + for _, pendingBlock := range pendingBlocks { + if pendingBlock.number <= highestFinalizedHeader.Number { + cs.pendingBlocks.removeBlock(pendingBlock.hash) + continue + } + + parentExists, err := cs.blockState.HasHeader(pendingBlock.header.ParentHash) + if err != nil { + return fmt.Errorf("getting pending block parent header: %w", err) + } + + if parentExists { + err := cs.handleReadyBlock(pendingBlock.toBlockData()) + if err != nil { + return fmt.Errorf("handling ready block: %w", err) + } + continue + } + + gapLength := pendingBlock.number - highestFinalizedHeader.Number + if gapLength > 128 { + logger.Warnf("gap of %d blocks, max expected: 128 block", gapLength) + gapLength = 128 + } + + descendingGapRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(pendingBlock.hash), + uint32(gapLength), network.BootstrapRequestData, network.Descending) + startAtBlock := pendingBlock.number - uint(*descendingGapRequest.Max) + 1 + + // the `requests` in the tip sync are not related necessarily + // this is why we need to treat them separately + resultsQueue := make(chan *syncTaskResult) + cs.workerPool.submitRequest(descendingGapRequest, resultsQueue) + + // TODO: we should handle the requests concurrently + // a way of achieve that is by constructing a new `handleWorkersResults` for + // handling only tip sync requests + err = cs.handleWorkersResults(resultsQueue, startAtBlock, *descendingGapRequest.Max) + if err != nil { + return fmt.Errorf("while handling workers results: %w", err) + } } - worker.peersTried[resultWorker.err.who] = struct{}{} - cs.tryDispatchWorker(worker) return nil } -// setMode stops all existing workers and clears the worker set and switches the `handler` -// based on the new mode, if the mode is different than previous -func (cs *chainSync) setMode(mode chainSyncState) { - if cs.state == mode { - return +func (cs *chainSync) requestMaxBlocksFrom(bestBlockHeader *types.Header) error { + startRequestAt := bestBlockHeader.Number + 1 + + // we build the set of requests based on the amount of available peers + // in the worker pool, if we have more peers than `maxRequestAllowed` + // so we limit to `maxRequestAllowed` to avoid the error: + // cannot reserve outbound connection: resource limit exceeded + availableWorkers := cs.workerPool.totalWorkers() + if availableWorkers > maxRequestsAllowed { + availableWorkers = maxRequestsAllowed } - // stop all current workers and clear set - cs.workerState.reset() + // targetBlockNumber is the virtual target we will request, however + // we should bound it to the real target which is collected through + // block announces received from other peers + targetBlockNumber := startRequestAt + availableWorkers*128 + realTarget, err := cs.getTarget() + if err != nil { + return fmt.Errorf("while getting target: %w", err) + } - // update handler to respective mode - switch mode { - case bootstrap: - cs.handler = newBootstrapSyncer(cs.blockState) - case tip: - cs.handler = newTipSyncer(cs.blockState, cs.pendingBlocks, cs.readyBlocks, cs.handleReadyBlock) + if targetBlockNumber > realTarget { + // basically if our virtual target is beyond the real target + // that means we are only a few requests away, then we + // calculate the correct amount of missing requests and then + // change to tip sync which should take care of the rest + diff := targetBlockNumber - realTarget + numOfRequestsToDrop := (diff / 128) + 1 + targetBlockNumber = targetBlockNumber - (numOfRequestsToDrop * 128) + } + + requests := network.NewAscendingBlockRequests(startRequestAt, targetBlockNumber, + network.BootstrapRequestData) + + var expectedAmountOfBlocks uint32 + for _, request := range requests { + if request.Max != nil { + expectedAmountOfBlocks += *request.Max + } + } + + resultsQueue := make(chan *syncTaskResult) + cs.workerPool.submitRequests(requests, resultsQueue) + + err = cs.handleWorkersResults(resultsQueue, startRequestAt, expectedAmountOfBlocks) + if err != nil { + return fmt.Errorf("while handling workers results: %w", err) } - cs.state = mode - isSyncedGauge.Set(float64(cs.state)) - logger.Debugf("switched sync mode to %d", mode) + return nil } // getTarget takes the average of all peer heads // TODO: should we just return the highest? could be an attack vector potentially, if a peer reports some very large // head block number, it would leave us in bootstrap mode forever // it would be better to have some sort of standard deviation calculation and discard any outliers (#1861) -func (cs *chainSync) getTarget() uint { - cs.RLock() - defer cs.RUnlock() +func (cs *chainSync) getTarget() (uint, error) { + cs.peerViewLock.RLock() + defer cs.peerViewLock.RUnlock() // in practice, this shouldn't happen, as we only start the module once we have some peer states - if len(cs.peerState) == 0 { - // return max uint32 instead of 0, as returning 0 would switch us to tip mode unexpectedly - return uint(1<<32 - 1) + if len(cs.peerView) == 0 { + return 0, errNoPeerViews } // we are going to sort the data and remove the outliers then we will return the avg of all the valid elements - uintArr := make([]uint, 0, len(cs.peerState)) - for _, ps := range cs.peerState { + uintArr := make([]uint, 0, len(cs.peerView)) + for _, ps := range cs.peerView { uintArr = append(uintArr, ps.number) } sum, count := nonOutliersSumCount(uintArr) quotientBigInt := big.NewInt(0).Div(sum, big.NewInt(int64(count))) - return uint(quotientBigInt.Uint64()) + return uint(quotientBigInt.Uint64()), nil } -// handleWork handles potential new work that may be triggered on receiving a peer's state -// in bootstrap mode, this begins the bootstrap process -// in tip mode, this adds the peer's state to the pendingBlocks set and potentially starts -// a fork sync -func (cs *chainSync) handleWork(ps *peerState) error { - logger.Tracef("handling potential work for target block number %d and hash %s", ps.number, ps.hash) - worker, err := cs.handler.handleNewPeerState(ps) - if err != nil { - return err - } else if worker != nil { - cs.tryDispatchWorker(worker) - } +// handleWorkersResults, every time we submit requests to workers they results should be computed here +// and every cicle we should endup with a complete chain, whenever we identify +// any error from a worker we should evaluate the error and re-insert the request +// in the queue and wait for it to completes +// TODO: handle only justification requests +func (cs *chainSync) handleWorkersResults( + workersResults chan *syncTaskResult, startAtBlock uint, expectedSyncedBlocks uint32) error { - return nil -} + startTime := time.Now() + defer func() { + totalSyncAndImportSeconds := time.Since(startTime).Seconds() + bps := float64(expectedSyncedBlocks) / totalSyncAndImportSeconds + logger.Debugf("⛓️ synced %d blocks, "+ + "took: %.2f seconds, bps: %.2f blocks/second", + expectedSyncedBlocks, totalSyncAndImportSeconds, bps) + }() -func (cs *chainSync) tryDispatchWorker(w *worker) { - // if we already have the maximum number of workers, don't dispatch another - if len(cs.workerState.workers) >= maxWorkers { - logger.Trace("reached max workers, ignoring potential work") - return - } + syncingChain := make([]*types.BlockData, expectedSyncedBlocks) + // the total numbers of blocks is missing in the syncing chain + waitingBlocks := expectedSyncedBlocks - // check current worker set for workers already working on these blocks - // if there are none, dispatch new worker - if cs.handler.hasCurrentWorker(w, cs.workerState.workers) { - return - } +taskResultLoop: + for waitingBlocks > 0 { + // in a case where we don't handle workers results we should check the pool + idleDuration := time.Minute + idleTimer := time.NewTimer(idleDuration) - cs.workerState.add(w) - go cs.dispatchWorker(w) -} + select { + case <-cs.stopCh: + return nil -// dispatchWorker begins making requests to the network and attempts to receive responses up until the target -// if it fails due to any reason, it sets the worker `err` and returns -// this function always places the worker into the `resultCh` for result handling upon return -func (cs *chainSync) dispatchWorker(w *worker) { - if w.targetNumber == nil || w.startNumber == nil { - return - } + case <-idleTimer.C: + logger.Warnf("idle ticker triggered! checking pool") + cs.workerPool.useConnectedPeers() + continue - logger.Debugf("dispatching sync worker id %d, "+ - "start number %d, target number %d, "+ - "start hash %s, target hash %s, "+ - "request data %d, direction %s", - w.id, - *w.startNumber, *w.targetNumber, - w.startHash, w.targetHash, - w.requestData, w.direction) + case taskResult := <-workersResults: + if !idleTimer.Stop() { + <-idleTimer.C + } - start := time.Now() - defer func() { - end := time.Now() - w.duration = end.Sub(start) - outcome := "success" - if w.err != nil { - outcome = "failure" - } - logger.Debugf( - "sync worker completed in %s with %s for worker id %d", - w.duration, outcome, w.id) - cs.resultQueue <- w - }() + logger.Debugf("task result: peer(%s), with error: %v, with response: %v", + taskResult.who, taskResult.err != nil, taskResult.response != nil) - reqs, err := workerToRequests(w) - if err != nil { - // if we are creating valid workers, this should not happen - logger.Criticalf("failed to create requests from worker id %d: %s", w.id, err) - w.err = &workerError{ - err: err, - } - return - } + if taskResult.err != nil { + logger.Errorf("task result: peer(%s) error: %s", + taskResult.who, taskResult.err) - for _, req := range reqs { - // TODO: if we find a good peer, do sync with them, right now it re-selects a peer each time (#1399) - if err := cs.doSync(req, w.peersTried); err != nil { - // failed to sync, set worker error and put into result queue - w.err = err - return - } - } -} + if !errors.Is(taskResult.err, network.ErrReceivedEmptyMessage) { + if strings.Contains(taskResult.err.Error(), "protocols not supported") { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, taskResult.who) + } + cs.workerPool.punishPeer(taskResult.who) + } -func (cs *chainSync) doSync(req *network.BlockRequestMessage, peersTried map[peer.ID]struct{}) *workerError { - // determine which peers have the blocks we want to request - peers := cs.determineSyncPeers(req, peersTried) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue + } - if len(peers) == 0 { - return &workerError{ - err: errNoPeers, - } - } + who := taskResult.who + request := taskResult.request + response := taskResult.response - // send out request and potentially receive response, error if timeout - logger.Tracef("sending out block request: %s", req) + if request.Direction == network.Descending { + // reverse blocks before pre-validating and placing in ready queue + reverseBlockData(response.BlockData) + } - // TODO: use scoring to determine what peer to try to sync from first (#1399) - idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(peers)))) - who := peers[idx.Int64()] + err := validateResponseFields(request.RequestedData, response.BlockData) + if err != nil { + logger.Criticalf("validating fields: %s", err) + // TODO: check the reputation change for nil body in response + // and nil justification in response + if errors.Is(err, errNilHeaderInResponse) { + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, who) + } - resp := new(network.BlockResponseMessage) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } - err := cs.blockReqRes.Do(who, req, resp) - if err != nil { - return &workerError{ - err: err, - who: who, - } - } + isChain := isResponseAChain(response.BlockData) + if !isChain { + logger.Criticalf("response from %s is not a chain", who) + cs.workerPool.punishPeer(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } + + for _, blockInResponse := range response.BlockData { + if slices.Contains(cs.badBlocks, blockInResponse.Hash.String()) { + logger.Criticalf("%s sent a known bad block: %s (#%d)", + who, blockInResponse.Hash.String(), blockInResponse.Number()) + + cs.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, who) + + cs.workerPool.ignorePeerAsWorker(taskResult.who) + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } + + blockExactIndex := blockInResponse.Header.Number - startAtBlock + syncingChain[blockExactIndex] = blockInResponse + } + + // we need to check if we've filled all positions + // otherwise we should wait for more responses + waitingBlocks -= uint32(len(response.BlockData)) + + // we received a response without the desired amount of blocks + // we should include a new request to retrieve the missing blocks + if len(response.BlockData) < int(*request.Max) { + difference := uint32(int(*request.Max) - len(response.BlockData)) + lastItem := response.BlockData[len(response.BlockData)-1] + + startRequestNumber := uint32(lastItem.Header.Number + 1) + startAt, err := variadic.NewUint32OrHash(startRequestNumber) + if err != nil { + panic(err) + } - if req.Direction == network.Descending { - // reverse blocks before pre-validating and placing in ready queue - reverseBlockData(resp.BlockData) + taskResult.request = &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, + StartingBlock: *startAt, + Direction: network.Ascending, + Max: &difference, + } + cs.workerPool.submitRequest(taskResult.request, workersResults) + continue taskResultLoop + } + } } - // perform some pre-validation of response, error if failure - if err := cs.validateResponse(req, resp, who); err != nil { - return &workerError{ - err: err, - who: who, + if len(syncingChain) >= 2 { + // ensure the acquired block set forms an actual chain + parentElement := syncingChain[0] + for _, element := range syncingChain[1:] { + if parentElement.Header.Hash() != element.Header.ParentHash { + panic(fmt.Sprintf("expected %s (#%d) be parent of %s (#%d)", + parentElement.Header.Hash(), parentElement.Header.Number, + element.Header.Hash(), element.Header.Number)) + } + parentElement = element } } - logger.Trace("success! placing block response data in ready queue") + retreiveBlocksSeconds := time.Since(startTime).Seconds() + logger.Debugf("🔽 retrieved %d blocks, took: %.2f seconds, starting process...", + expectedSyncedBlocks, retreiveBlocksSeconds) // response was validated! place into ready block queue - for _, bd := range resp.BlockData { + for _, bd := range syncingChain { // block is ready to be processed! - cs.handleReadyBlock(bd) + if err := cs.handleReadyBlock(bd); err != nil { + return fmt.Errorf("while handling ready block: %w", err) + } } - return nil } -func (cs *chainSync) handleReadyBlock(bd *types.BlockData) { - if cs.readyBlocks.has(bd.Hash) { - logger.Tracef("ignoring block %s in response, already in ready queue", bd.Hash) - return - } - +func (cs *chainSync) handleReadyBlock(bd *types.BlockData) error { // if header was not requested, get it from the pending set // if we're expecting headers, validate should ensure we have a header if bd.Header == nil { @@ -747,328 +767,288 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData) { // block wasn't in the pending set! // let's check the db as maybe we already processed it has, err := cs.blockState.HasHeader(bd.Hash) - if err != nil && !errors.Is(err, chaindb.ErrKeyNotFound) { + if err != nil && !errors.Is(err, pebble.ErrNotFound) { logger.Debugf("failed to check if header is known for hash %s: %s", bd.Hash, err) - return + return err } if has { logger.Tracef("ignoring block we've already processed, hash=%s", bd.Hash) - return + return err } // this is bad and shouldn't happen logger.Errorf("block with unknown header is ready: hash=%s", bd.Hash) - return + return err + } + + if block.header == nil { + logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) + return nil } bd.Header = block.header } - if bd.Header == nil { - logger.Errorf("new ready block number (unknown) with hash %s", bd.Hash) - return + err := cs.processBlockData(*bd) + if err != nil { + // depending on the error, we might want to save this block for later + logger.Errorf("block data processing for block with hash %s failed: %s", bd.Hash, err) + return err } - logger.Tracef("new ready block number %d with hash %s", bd.Header.Number, bd.Hash) - - // see if there are any descendents in the pending queue that are now ready to be processed, - // as we have just become aware of their parent block - ready := []*types.BlockData{bd} - ready = cs.pendingBlocks.getReadyDescendants(bd.Hash, ready) - - for _, rb := range ready { - cs.pendingBlocks.removeBlock(rb.Hash) - cs.readyBlocks.push(rb) - } + cs.pendingBlocks.removeBlock(bd.Hash) + return nil } -// determineSyncPeers returns a list of peers that likely have the blocks in the given block request. -func (cs *chainSync) determineSyncPeers(req *network.BlockRequestMessage, peersTried map[peer.ID]struct{}) []peer.ID { - var start uint32 - if req.StartingBlock.IsUint32() { - start = req.StartingBlock.Uint32() +// processBlockData processes the BlockData from a BlockResponse and +// returns the index of the last BlockData it handled on success, +// or the index of the block data that errored on failure. +func (cs *chainSync) processBlockData(blockData types.BlockData) error { + headerInState, err := cs.blockState.HasHeader(blockData.Hash) + if err != nil { + return fmt.Errorf("checking if block state has header: %w", err) } - cs.RLock() - defer cs.RUnlock() - - // if we're currently ignoring all our peers, clear out the list. - if len(cs.peerState) == len(cs.ignorePeers) { - cs.RUnlock() - cs.Lock() - for p := range cs.ignorePeers { - delete(cs.ignorePeers, p) - } - cs.Unlock() - cs.RLock() + bodyInState, err := cs.blockState.HasBlockBody(blockData.Hash) + if err != nil { + return fmt.Errorf("checking if block state has body: %w", err) } - peers := make([]peer.ID, 0, len(cs.peerState)) - - for p, state := range cs.peerState { - if _, has := cs.ignorePeers[p]; has { - continue - } - - if _, has := peersTried[p]; has { - continue - } - - // if peer definitely doesn't have any blocks we want in the request, - // don't request from them - if start > 0 && uint32(state.number) < start { - continue + // while in bootstrap mode we don't need to broadcast block announcements + announceImportedBlock := cs.getSyncMode() == tip + if headerInState && bodyInState { + err = cs.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock) + if err != nil { + return fmt.Errorf("processing block data with header and "+ + "body in block state: %w", err) } - - peers = append(peers, p) + return nil } - return peers -} - -// validateResponse performs pre-validation of a block response before placing it into either the -// pendingBlocks or readyBlocks set. -// It checks the following: -// - the response is not empty -// - the response contains all the expected fields -// - the block is not contained in the bad block list -// - each block has the correct parent, ie. the response constitutes a valid chain -func (cs *chainSync) validateResponse(req *network.BlockRequestMessage, - resp *network.BlockResponseMessage, p peer.ID) error { - if resp == nil || len(resp.BlockData) == 0 { - return errEmptyBlockData - } - - logger.Tracef("validating block response starting at block hash %s", resp.BlockData[0].Hash) - - var ( - prev, curr *types.Header - err error - ) - headerRequested := (req.RequestedData & network.RequestedDataHeader) == 1 - - for i, bd := range resp.BlockData { - if err = cs.validateBlockData(req, bd, p); err != nil { - return err - } - - if headerRequested { - curr = bd.Header - } else { - // if this is a justification-only request, make sure we have the block for the justification - if err = cs.validateJustification(bd); err != nil { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.BadJustificationValue, - Reason: peerset.BadJustificationReason, - }, p) - return err - } - continue - } - - // check that parent of first block in response is known (either in our db or in the ready queue) - if i == 0 { - prev = curr - - // check that we know the parent of the first block (or it's in the ready queue) - has, _ := cs.blockState.HasHeader(curr.ParentHash) - if has { - continue - } - - if cs.readyBlocks.has(curr.ParentHash) { - continue - } - - // parent unknown, add to pending blocks - if err := cs.pendingBlocks.addBlock(&types.Block{ - Header: *curr, - Body: *bd.Body, - }); err != nil { - return err - } - - if bd.Justification != nil { - if err := cs.pendingBlocks.addJustification(bd.Hash, *bd.Justification); err != nil { - return err - } + if blockData.Header != nil { + if blockData.Body != nil { + err = cs.processBlockDataWithHeaderAndBody(blockData, announceImportedBlock) + if err != nil { + return fmt.Errorf("processing block data with header and body: %w", err) } - - return errUnknownParent } - // otherwise, check that this response forms a chain - // ie. curr's parent hash is hash of previous header, and curr's number is previous number + 1 - if prev.Hash() != curr.ParentHash || curr.Number != prev.Number+1 { - // the response is missing some blocks, place blocks from curr onwards into pending blocks set - for _, bd := range resp.BlockData[i:] { - if err := cs.pendingBlocks.addBlock(&types.Block{ - Header: *curr, - Body: *bd.Body, - }); err != nil { - return err - } - - if bd.Justification != nil { - if err := cs.pendingBlocks.addJustification(bd.Hash, *bd.Justification); err != nil { - return err - } - } + if blockData.Justification != nil && len(*blockData.Justification) > 0 { + logger.Infof("handling justification for block %s (#%d)", blockData.Hash.Short(), blockData.Number()) + err = cs.handleJustification(blockData.Header, *blockData.Justification) + if err != nil { + return fmt.Errorf("handling justification: %w", err) } - return errResponseIsNotChain } + } - prev = curr + err = cs.blockState.CompareAndSetBlockData(&blockData) + if err != nil { + return fmt.Errorf("comparing and setting block data: %w", err) } return nil } -// validateBlockData checks that the expected fields are in the block data -func (cs *chainSync) validateBlockData(req *network.BlockRequestMessage, bd *types.BlockData, p peer.ID) error { - if bd == nil { - return errNilBlockData +func (cs *chainSync) processBlockDataWithStateHeaderAndBody(blockData types.BlockData, + announceImportedBlock bool) (err error) { + // TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly, + // so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync + // if we update the node to only store finalised blocks in the database, this should be fixed and the entire + // code block can be removed (#1784) + block, err := cs.blockState.GetBlockByHash(blockData.Hash) + if err != nil { + return fmt.Errorf("getting block by hash: %w", err) } - requestedData := req.RequestedData + err = cs.blockState.AddBlockToBlockTree(block) + if errors.Is(err, blocktree.ErrBlockExists) { + logger.Debugf( + "block number %d with hash %s already exists in block tree, skipping it.", + block.Header.Number, blockData.Hash) + return nil + } else if err != nil { + return fmt.Errorf("adding block to blocktree: %w", err) + } - if slices.Contains(cs.badBlocks, bd.Hash.String()) { - logger.Errorf("Rejecting known bad block Number: %d Hash: %s", bd.Number(), bd.Hash) - return errBadBlock + if blockData.Justification != nil && len(*blockData.Justification) > 0 { + err = cs.handleJustification(&block.Header, *blockData.Justification) + if err != nil { + return fmt.Errorf("handling justification: %w", err) + } } - if (requestedData&network.RequestedDataHeader) == 1 && bd.Header == nil { - cs.network.ReportPeer(peerset.ReputationChange{ - Value: peerset.IncompleteHeaderValue, - Reason: peerset.IncompleteHeaderReason, - }, p) - return errNilHeaderInResponse + // TODO: this is probably unnecessary, since the state is already in the database + // however, this case shouldn't be hit often, since it's only hit if the node state + // is rewinded or if the node shuts down unexpectedly (#1784) + state, err := cs.storageState.TrieState(&block.Header.StateRoot) + if err != nil { + return fmt.Errorf("loading trie state: %w", err) } - if (requestedData&network.RequestedDataBody>>1) == 1 && bd.Body == nil { - return fmt.Errorf("%w: hash=%s", errNilBodyInResponse, bd.Hash) + err = cs.blockImportHandler.HandleBlockImport(block, state, announceImportedBlock) + if err != nil { + return fmt.Errorf("handling block import: %w", err) } return nil } -func (cs *chainSync) validateJustification(bd *types.BlockData) error { - if bd == nil { - return errNilBlockData +func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData, + announceImportedBlock bool) (err error) { + err = cs.babeVerifier.VerifyBlock(blockData.Header) + if err != nil { + return fmt.Errorf("babe verifying block: %w", err) } - // this is ok, since the remote peer doesn't need to provide the info we request from them - // especially with justifications, it's common that they don't have them. - if bd.Justification == nil { - return nil + cs.handleBody(blockData.Body) + + block := &types.Block{ + Header: *blockData.Header, + Body: *blockData.Body, } - has, _ := cs.blockState.HasHeader(bd.Hash) - if !has { - return errUnknownBlockForJustification + err = cs.handleBlock(block, announceImportedBlock) + if err != nil { + return fmt.Errorf("handling block: %w", err) } return nil } -func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { - cs.RLock() - defer cs.RUnlock() +// handleHeader handles block bodies included in BlockResponses +func (cs *chainSync) handleBody(body *types.Body) { + acc := 0 + for _, ext := range *body { + acc += len(ext) + cs.transactionState.RemoveExtrinsic(ext) + } - if len(cs.peerState) == 0 { - return 0, errNoPeers + blockSizeGauge.Set(float64(acc)) +} + +func (cs *chainSync) handleJustification(header *types.Header, justification []byte) (err error) { + logger.Debugf("handling justification for block %d...", header.Number) + + headerHash := header.Hash() + err = cs.finalityGadget.VerifyBlockJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("verifying block number %d justification: %w", header.Number, err) } - for _, ps := range cs.peerState { - if ps.number < highestBlock { - continue - } - highestBlock = ps.number + err = cs.blockState.SetJustification(headerHash, justification) + if err != nil { + return fmt.Errorf("setting justification for block number %d: %w", header.Number, err) } - return highestBlock, nil + logger.Infof("🔨 finalised block number %d with hash %s", header.Number, headerHash) + return nil } -func workerToRequests(w *worker) ([]*network.BlockRequestMessage, error) { - diff := int(*w.targetNumber) - int(*w.startNumber) - if diff < 0 && w.direction != network.Descending { - return nil, errInvalidDirection +// handleHeader handles blocks (header+body) included in BlockResponses +func (cs *chainSync) handleBlock(block *types.Block, announceImportedBlock bool) error { + parent, err := cs.blockState.GetHeader(block.Header.ParentHash) + if err != nil { + return fmt.Errorf("%w: %s", errFailedToGetParent, err) } - if diff > 0 && w.direction != network.Ascending { - return nil, errInvalidDirection + cs.storageState.Lock() + defer cs.storageState.Unlock() + + ts, err := cs.storageState.TrieState(&parent.StateRoot) + if err != nil { + return err } - // start and end block are the same, just request 1 block - if diff == 0 { - diff = 1 + root := ts.MustRoot() + if !bytes.Equal(parent.StateRoot[:], root[:]) { + panic("parent state root does not match snapshot state root") } - // to deal with descending requests (ie. target may be lower than start) which are used in tip mode, - // take absolute value of difference between start and target - numBlocks := diff - if numBlocks < 0 { - numBlocks = -numBlocks + rt, err := cs.blockState.GetRuntime(parent.Hash()) + if err != nil { + return err } - numRequests := uint(numBlocks) / maxResponseSize - if numBlocks%maxResponseSize != 0 { - numRequests++ + rt.SetContextStorage(ts) + + _, err = rt.ExecuteBlock(block) + if err != nil { + return fmt.Errorf("failed to execute block %d: %w", block.Header.Number, err) } - startNumber := *w.startNumber - reqs := make([]*network.BlockRequestMessage, numRequests) + if err = cs.blockImportHandler.HandleBlockImport(block, ts, announceImportedBlock); err != nil { + return err + } - for i := uint(0); i < numRequests; i++ { - // check if we want to specify a size - max := uint32(maxResponseSize) + blockHash := block.Header.Hash() + cs.telemetry.SendMessage(telemetry.NewBlockImport( + &blockHash, + block.Header.Number, + "NetworkInitialSync")) - if w.direction == network.Descending && i == numRequests-1 { - size := numBlocks % maxResponseSize - if size == 0 { - size = maxResponseSize - } - max = uint32(size) + return nil +} + +// validateResponseFields checks that the expected fields are in the block data +func validateResponseFields(requestedData byte, blocks []*types.BlockData) error { + for _, bd := range blocks { + if bd == nil { + return errNilBlockData } - var start *variadic.Uint32OrHash - if w.startHash.IsEmpty() { - // worker startHash is unspecified if we are in bootstrap mode - start = variadic.MustNewUint32OrHash(uint32(startNumber)) - } else { - // in tip-syncing mode, we know the hash of the block on the fork we wish to sync - start = variadic.MustNewUint32OrHash(w.startHash) + if (requestedData&network.RequestedDataHeader) == network.RequestedDataHeader && bd.Header == nil { + return fmt.Errorf("%w: %s", errNilHeaderInResponse, bd.Hash) + } - // if we're doing descending requests and not at the last (highest starting) request, - // then use number as start block - if w.direction == network.Descending && i != numRequests-1 { - start = variadic.MustNewUint32OrHash(startNumber) - } + if (requestedData&network.RequestedDataBody) == network.RequestedDataBody && bd.Body == nil { + return fmt.Errorf("%w: %s", errNilBodyInResponse, bd.Hash) } - reqs[i] = &network.BlockRequestMessage{ - RequestedData: w.requestData, - StartingBlock: *start, - Direction: w.direction, - Max: &max, + // if we requested strictly justification + if (requestedData|network.RequestedDataJustification) == network.RequestedDataJustification && + bd.Justification == nil { + return fmt.Errorf("%w: %s", errNilJustificationInResponse, bd.Hash) } + } - switch w.direction { - case network.Ascending: - startNumber += maxResponseSize - case network.Descending: - startNumber -= maxResponseSize + return nil +} + +func isResponseAChain(responseBlockData []*types.BlockData) bool { + if len(responseBlockData) < 2 { + return true + } + + previousBlockData := responseBlockData[0] + for _, currBlockData := range responseBlockData[1:] { + previousHash := previousBlockData.Header.Hash() + isParent := previousHash == currBlockData.Header.ParentHash + if !isParent { + return false } + + previousBlockData = currBlockData } - // if our direction is descending, we want to send out the request with the lowest - // startNumber first - if w.direction == network.Descending { - for i, j := 0, len(reqs)-1; i < j; i, j = i+1, j-1 { - reqs[i], reqs[j] = reqs[j], reqs[i] + return true +} + +func (cs *chainSync) getHighestBlock() (highestBlock uint, err error) { + cs.peerViewLock.RLock() + defer cs.peerViewLock.RUnlock() + + if len(cs.peerView) == 0 { + return 0, errNoPeers + } + + for _, ps := range cs.peerView { + if ps.number < highestBlock { + continue } + highestBlock = ps.number } - return reqs, nil + return highestBlock, nil } diff --git a/dot/sync/chain_sync_integration_test.go b/dot/sync/chain_sync_integration_test.go deleted file mode 100644 index 375059cb34..0000000000 --- a/dot/sync/chain_sync_integration_test.go +++ /dev/null @@ -1,88 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/peerset" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/golang/mock/gomock" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" -) - -func TestValidateBlockData(t *testing.T) { - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: -1048576, - Reason: "Incomplete header", - }, peer.ID("")) - cs.network = mockNetwork - - req := &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, - } - - err := cs.validateBlockData(req, nil, "") - require.Equal(t, errNilBlockData, err) - - err = cs.validateBlockData(req, &types.BlockData{}, "") - require.Equal(t, errNilHeaderInResponse, err) - - err = cs.validateBlockData(req, &types.BlockData{ - Header: &types.Header{}, - }, "") - require.ErrorIs(t, err, errNilBodyInResponse) - - err = cs.validateBlockData(req, &types.BlockData{ - Header: &types.Header{}, - Body: &types.Body{}, - }, "") - require.NoError(t, err) -} - -func TestChainSync_validateResponse_firstBlock_Integration(t *testing.T) { - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - bs := NewMockBlockState(ctrl) - bs.EXPECT().HasHeader(gomock.AssignableToTypeOf(common.Hash{})).Return(false, nil) - cs.blockState = bs - - req := &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, - } - - header := &types.Header{ - Number: 2, - } - - resp := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: header.Hash(), - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - Justification: &[]byte{0}, - }, - }, - } - - err := cs.validateResponse(req, resp, "") - require.True(t, errors.Is(err, errUnknownParent)) - require.True(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - bd := cs.pendingBlocks.getBlock(header.Hash()) - require.NotNil(t, bd.header) - require.NotNil(t, bd.body) - require.NotNil(t, bd.justification) -} diff --git a/dot/sync/chain_sync_test.go b/dot/sync/chain_sync_test.go index a53aa8ba4c..7943d26133 100644 --- a/dot/sync/chain_sync_test.go +++ b/dot/sync/chain_sync_test.go @@ -4,26 +4,27 @@ package sync import ( - "context" "errors" + "fmt" + "sync/atomic" "testing" "time" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/peerset" + "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/ChainSafe/gossamer/lib/runtime/storage" "github.com/ChainSafe/gossamer/lib/trie" + "github.com/ChainSafe/gossamer/pkg/scale" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const defaultSlotDuration = 6 * time.Second - func Test_chainSyncState_String(t *testing.T) { t.Parallel() @@ -58,1604 +59,1514 @@ func Test_chainSyncState_String(t *testing.T) { } } -func Test_chainSync_setPeerHead(t *testing.T) { +func Test_chainSync_onImportBlock(t *testing.T) { t.Parallel() + const somePeer = peer.ID("abc") errTest := errors.New("test error") - const somePeer = peer.ID("abc") - someHash := common.Hash{1, 2, 3, 4} + emptyTrieState := storage.NewTrieState(nil) + block1AnnounceHeader := types.NewHeader(common.Hash{}, emptyTrieState.MustRoot(), + common.Hash{}, 1, scale.VaryingDataTypeSlice{}) + block2AnnounceHeader := types.NewHeader(block1AnnounceHeader.Hash(), emptyTrieState.MustRoot(), + common.Hash{}, 2, scale.VaryingDataTypeSlice{}) testCases := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller) *chainSync - peerID peer.ID - hash common.Hash - number uint - errWrapped error - errMessage string - expectedPeerIDToPeerState map[peer.ID]*peerState - expectedQueuedPeerStates []*peerState + listenForRequests bool + chainSyncBuilder func(ctrl *gomock.Controller) *chainSync + peerID peer.ID + blockAnnounceHeader *types.Header + errWrapped error + errMessage string }{ - "best_block_header_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - blockState.EXPECT().BestBlockHeader().Return(nil, errTest) - return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errTest, - errMessage: "best block header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number_smaller_than_best_block_number_get_hash_by_number_error": { + "announced_block_already_exists_in_disjoint_set": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{}, errTest) + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(true) return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, + pendingBlocks: pendingBlocks, } }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errTest, - errMessage: "get block hash by number: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errAlreadyInDisjointSet, + errMessage: fmt.Sprintf("already in disjoint set: block %s (#%d)", + block2AnnounceHeader.Hash(), block2AnnounceHeader.Number), }, - "number_smaller_than_best_block_number_and_same_hash": { + "failed_to_add_announced_block_in_disjoint_set": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)).Return(someHash, nil) + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(errTest) + return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, + pendingBlocks: pendingBlocks, } }, - peerID: somePeer, - hash: someHash, - number: 1, - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errTest, + errMessage: "while adding pending block header: test error", }, - "number_smaller_than_best_block_number_get_highest_finalised_header_error": { + "announced_block_while_in_bootstrap_mode": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) + pendingBlocks := NewMockDisjointBlockSet(ctrl) + pendingBlocks.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocks.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + + state := atomic.Value{} + state.Store(bootstrap) + return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, + pendingBlocks: pendingBlocks, + syncMode: state, } }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errTest, - errMessage: "get highest finalised header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, }, - "number_smaller_than_best_block_number_and_finalised_number_equal_than_number": { + "announced_block_while_in_tip_mode": { chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) - return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, - network: network, + pendingBlocksMock := NewMockDisjointBlockSet(ctrl) + pendingBlocksMock.EXPECT().hasBlock(block2AnnounceHeader.Hash()).Return(false) + pendingBlocksMock.EXPECT().addHeader(block2AnnounceHeader).Return(nil) + pendingBlocksMock.EXPECT().removeBlock(block2AnnounceHeader.Hash()) + pendingBlocksMock.EXPECT().size().Return(int(0)) + + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT(). + HasHeader(block2AnnounceHeader.Hash()). + Return(false, nil) + + blockStateMock.EXPECT(). + BestBlockHeader(). + Return(block1AnnounceHeader, nil) + + blockStateMock.EXPECT(). + GetHighestFinalisedHeader(). + Return(block2AnnounceHeader, nil) + + expectedRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(block2AnnounceHeader.Hash()), + 1, network.BootstrapRequestData, network.Descending) + + fakeBlockBody := types.Body([]types.Extrinsic{}) + mockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: block2AnnounceHeader.Hash(), + Header: block2AnnounceHeader, + Body: &fakeBlockBody, + }, + }, } - }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, - }, - "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(1)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 2} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - network := NewMockNetwork(ctrl) - network.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadBlockAnnouncementValue, - Reason: peerset.BadBlockAnnouncementReason, - }, somePeer) + + networkMock := NewMockNetwork(ctrl) + requestMaker := NewMockRequestMaker(ctrl) + requestMaker.EXPECT(). + Do(somePeer, expectedRequest, &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *mockedBlockResponse + return nil + }) + + babeVerifierMock := NewMockBabeVerifier(ctrl) + storageStateMock := NewMockStorageState(ctrl) + importHandlerMock := NewMockBlockImportHandler(ctrl) + telemetryMock := NewMockTelemetry(ctrl) + + const announceBlock = true + ensureSuccessfulBlockImportFlow(t, block1AnnounceHeader, mockedBlockResponse.BlockData, + blockStateMock, babeVerifierMock, storageStateMock, importHandlerMock, telemetryMock, + announceBlock) + + workerPool := newSyncWorkerPool(networkMock, requestMaker) + // include the peer who announced the block in the pool + workerPool.newPeer(somePeer) + + state := atomic.Value{} + state.Store(tip) + return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, - network: network, + pendingBlocks: pendingBlocksMock, + syncMode: state, + workerPool: workerPool, + network: networkMock, + blockState: blockStateMock, + babeVerifier: babeVerifierMock, + telemetry: telemetryMock, + storageState: storageStateMock, + blockImportHandler: importHandlerMock, } }, - peerID: somePeer, - hash: someHash, - number: 1, - errWrapped: errPeerOnInvalidFork, - errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 1, - }, - }, + listenForRequests: true, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, }, - "number smaller than best block number and " + - "finalised number smaller than number and " + - "has_header_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(someHash).Return(false, errTest) - return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, - } - }, - peerID: somePeer, - hash: someHash, - number: 2, - errWrapped: errTest, - errMessage: "has header: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, + } + + for name, tt := range testCases { + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) + + chainSync := tt.chainSyncBuilder(ctrl) + if tt.listenForRequests { + stopCh := make(chan struct{}) + defer close(stopCh) + go chainSync.workerPool.listenForRequests(stopCh) + } + + err := chainSync.onBlockAnnounce(announcedBlock{ + who: tt.peerID, + header: tt.blockAnnounceHeader, + }) + + assert.ErrorIs(t, err, tt.errWrapped) + if tt.errWrapped != nil { + assert.EqualError(t, err, tt.errMessage) + } + }) + } +} + +func TestChainSync_setPeerHead(t *testing.T) { + const randomHashString = "0x580d77a9136035a0bc3c3cd86286172f7f81291164c5914266073a30466fba21" + randomHash := common.MustHexToHash(randomHashString) + + testcases := map[string]struct { + newChainSync func(t *testing.T, ctrl *gomock.Controller) *chainSync + peerID peer.ID + bestHash common.Hash + bestNumber uint + shouldBeAWorker bool + workerStatus byte + }{ + "set_peer_head_with_new_peer": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) + + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs + }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: true, + workerStatus: available, }, - "number smaller than best block number and " + - "finalised number smaller than number and " + - "has_the_hash": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 3} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - blockState.EXPECT().GetHashByNumber(uint(2)). - Return(common.Hash{2}, nil) // other hash than someHash - finalisedBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) - blockState.EXPECT().HasHeader(someHash).Return(true, nil) - return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, + "set_peer_head_with_a_to_ignore_peer_should_not_be_included_in_the_workerpoll": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) + workerPool.ignorePeers = map[peer.ID]struct{}{ + peer.ID("peer-test"): {}, } + + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs }, - peerID: somePeer, - hash: someHash, - number: 2, - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: false, }, - "number_bigger_than_the_head_number_add_hash_and_number_error": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). - Return(errTest) - return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, - pendingBlocks: pendingBlocks, + "set_peer_head_that_stills_punished_in_the_worker_poll": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) + workerPool.workers = map[peer.ID]*peerSyncWorker{ + peer.ID("peer-test"): { + status: punished, + punishmentTime: time.Now().Add(3 * time.Hour), + }, } + + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs }, - peerID: somePeer, - hash: someHash, - number: 2, - errWrapped: errTest, - errMessage: "add hash and number: test error", - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: true, + workerStatus: punished, }, - "number_bigger_than_the_head_number_success": { - chainSyncBuilder: func(ctrl *gomock.Controller) *chainSync { - blockState := NewMockBlockState(ctrl) - bestBlockHeader := &types.Header{Number: 1} - blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) - pendingBlocks := NewMockDisjointBlockSet(ctrl) - pendingBlocks.EXPECT().addHashAndNumber(someHash, uint(2)). - Return(nil) - return &chainSync{ - peerState: map[peer.ID]*peerState{}, - blockState: blockState, - pendingBlocks: pendingBlocks, - // buffered of 1 so setPeerHead can write to it - // without a consumer of the channel on the other end. - workQueue: make(chan *peerState, 1), + "set_peer_head_that_punishment_isnot_valid_in_the_worker_poll": { + newChainSync: func(t *testing.T, ctrl *gomock.Controller) *chainSync { + networkMock := NewMockNetwork(ctrl) + workerPool := newSyncWorkerPool(networkMock, NewMockRequestMaker(nil)) + workerPool.workers = map[peer.ID]*peerSyncWorker{ + peer.ID("peer-test"): { + status: punished, + punishmentTime: time.Now().Add(-3 * time.Hour), + }, } + + cs := newChainSyncTest(t, ctrl) + cs.workerPool = workerPool + return cs }, - peerID: somePeer, - hash: someHash, - number: 2, - expectedPeerIDToPeerState: map[peer.ID]*peerState{ - somePeer: { - who: somePeer, - hash: someHash, - number: 2, - }, - }, - expectedQueuedPeerStates: []*peerState{ - { - who: somePeer, - hash: someHash, - number: 2, - }, - }, + peerID: peer.ID("peer-test"), + bestHash: randomHash, + bestNumber: uint(20), + shouldBeAWorker: true, + workerStatus: available, }, } - for name, testCase := range testCases { - testCase := testCase - t.Run(name, func(t *testing.T) { - t.Parallel() + for tname, tt := range testcases { + tt := tt + t.Run(tname, func(t *testing.T) { ctrl := gomock.NewController(t) + cs := tt.newChainSync(t, ctrl) + cs.setPeerHead(tt.peerID, tt.bestHash, tt.bestNumber) + + view, exists := cs.peerView[tt.peerID] + require.True(t, exists) + require.Equal(t, tt.peerID, view.who) + require.Equal(t, tt.bestHash, view.hash) + require.Equal(t, tt.bestNumber, view.number) + + if tt.shouldBeAWorker { + syncWorker, exists := cs.workerPool.workers[tt.peerID] + require.True(t, exists) + require.Equal(t, tt.workerStatus, syncWorker.status) + } else { + _, exists := cs.workerPool.workers[tt.peerID] + require.False(t, exists) + } + }) + } +} + +func newChainSyncTest(t *testing.T, ctrl *gomock.Controller) *chainSync { + t.Helper() + + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + + cfg := chainSyncConfig{ + bs: mockBlockState, + pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), + minPeers: 1, + maxPeers: 5, + slotDuration: 6 * time.Second, + } - chainSync := testCase.chainSyncBuilder(ctrl) + return newChainSync(cfg) +} - err := chainSync.setPeerHead(testCase.peerID, testCase.hash, testCase.number) +func setupChainSyncToBootstrapMode(t *testing.T, blocksAhead uint, + bs BlockState, net Network, reqMaker network.RequestMaker, babeVerifier BabeVerifier, + storageState StorageState, blockImportHandler BlockImportHandler, telemetry Telemetry) *chainSync { + t.Helper() + mockedPeerID := []peer.ID{ + peer.ID("some_peer_1"), + peer.ID("some_peer_2"), + peer.ID("some_peer_3"), + } - assert.ErrorIs(t, err, testCase.errWrapped) - if testCase.errWrapped != nil { - assert.EqualError(t, err, testCase.errMessage) - } - assert.Equal(t, testCase.expectedPeerIDToPeerState, chainSync.peerState) + peerViewMap := map[peer.ID]*peerView{} + for _, p := range mockedPeerID { + peerViewMap[p] = &peerView{ + who: p, + hash: common.Hash{1, 2, 3}, + number: blocksAhead, + } + } - require.Equal(t, len(testCase.expectedQueuedPeerStates), len(chainSync.workQueue)) - for _, expectedPeerState := range testCase.expectedQueuedPeerStates { - peerState := <-chainSync.workQueue - assert.Equal(t, expectedPeerState, peerState) - } - }) + cfg := chainSyncConfig{ + pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), + minPeers: 1, + maxPeers: 5, + slotDuration: 6 * time.Second, + bs: bs, + net: net, + requestMaker: reqMaker, + babeVerifier: babeVerifier, + storageState: storageState, + blockImportHandler: blockImportHandler, + telemetry: telemetry, } + + chainSync := newChainSync(cfg) + chainSync.peerView = peerViewMap + chainSync.syncMode.Store(bootstrap) + + return chainSync } -func TestChainSync_sync_bootstrap_withWorkerError(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorker(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - mockBlockState := NewMockBlockState(ctrl) - mockHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 0, - types.NewDigest()) - mockBlockState.EXPECT().BestBlockHeader().Return(mockHeader, nil).Times(2) - cs.blockState = mockBlockState - cs.handler = newBootstrapSyncer(mockBlockState) - mockNetwork := NewMockNetwork(ctrl) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + const blocksAhead = 129 + totalBlockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, int(blocksAhead)-1) + mockedNetwork := NewMockNetwork(ctrl) + + workerPeerID := peer.ID("noot") startingBlock := variadic.MustNewUint32OrHash(1) max := uint32(128) - mockReqRes := NewMockRequestMaker(ctrl) - mockReqRes.EXPECT().Do(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, + mockedRequestMaker := NewMockRequestMaker(ctrl) + + expectedBlockRequestMessage := &network.BlockRequestMessage{ + RequestedData: network.BootstrapRequestData, StartingBlock: *startingBlock, - Direction: 0, + Direction: network.Ascending, Max: &max, - }, &network.BlockResponseMessage{}) - cs.blockReqRes = mockReqRes - cs.network = mockNetwork + } - go cs.sync() - defer cs.cancel() + mockedRequestMaker.EXPECT(). + Do(workerPeerID, expectedBlockRequestMessage, &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *totalBlockResponse + return nil + }) - testPeer := peer.ID("noot") - cs.peerState[testPeer] = &peerState{ - number: 1000, - } + mockedBlockState := NewMockBlockState(ctrl) + mockedBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + const announceBlock = false + // setup mocks for new synced blocks that doesn't exists in our local database + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, totalBlockResponse.BlockData, mockedBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block X as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by X blocks, we should execute a bootstrap + // sync request those blocks + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockedBlockState, mockedNetwork, mockedRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(129), target) - cs.workQueue <- cs.peerState[testPeer] + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("noot")) - select { - case res := <-cs.resultQueue: - expected := &workerError{ - err: errEmptyBlockData, // since MockNetwork returns a nil response - who: testPeer, - } - require.Equal(t, expected, res.err) - case <-time.After(5 * time.Second): - t.Fatal("did not get worker response") - } + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) - require.Equal(t, bootstrap, cs.state) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh } -func TestChainSync_sync_tip(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithTwoWorkers(t *testing.T) { t.Parallel() - done := make(chan struct{}) - ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - header := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, trie.EmptyHash, 1000, - types.NewDigest()) - - bs := NewMockBlockState(ctrl) - bs.EXPECT().BestBlockHeader().Return(header, nil) - bs.EXPECT().GetHighestFinalisedHeader().DoAndReturn(func() (*types.Header, error) { - close(done) - return header, nil - }) - cs.blockState = bs - - go cs.sync() - defer cs.cancel() - - testPeer := peer.ID("noot") - cs.peerState[testPeer] = &peerState{ - number: 999, + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], } + const announceBlock = false + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *worker1Response + return nil + }) + + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *worker2Response + return nil + }) + + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) - cs.workQueue <- cs.peerState[testPeer] - <-done - require.Equal(t, tip, cs.state) + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("noot")) + cs.workerPool.fromBlockAnnounce(peer.ID("noot2")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh } -func TestChainSync_getTarget(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithOneWorkerFailing(t *testing.T) { + t.Parallel() + ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) - require.Equal(t, uint(1<<32-1), cs.getTarget()) - cs.peerState = map[peer.ID]*peerState{ - "a": { - number: 0, // outlier - }, - "b": { - number: 110, - }, - "c": { - number: 120, - }, - "d": { - number: 130, - }, - "e": { - number: 140, - }, - "f": { - number: 150, - }, - "g": { - number: 1000, // outlier - }, + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], } - require.Equal(t, uint(130), cs.getTarget()) // sum:650/count:5= avg:130 + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) - cs.peerState = map[peer.ID]*peerState{ - "testA": { - number: 1000, - }, - "testB": { - number: 2000, - }, + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(peerID, _, response any) any { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail + // then alice should pick the failed request and re-execute it which will + // be the third call + responsePtr := response.(*network.BlockResponseMessage) + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + *responsePtr = *worker1Response + return nil + } + + if pID == peer.ID("bob") { + return errors.New("a bad error while getting a response") + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } + + *responsePtr = *worker2Response + return nil + }).Times(3) + + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh - require.Equal(t, uint(1500), cs.getTarget()) + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] + require.True(t, ok) + require.Equal(t, punished, syncWorker.status) } -func TestWorkerToRequests(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithProtocolNotSupported(t *testing.T) { t.Parallel() - w := &worker{ - startNumber: uintPtr(10), - targetNumber: uintPtr(1), - direction: network.Ascending, - } - _, err := workerToRequests(w) - require.Equal(t, errInvalidDirection, err) + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) - type testCase struct { - w *worker - expected []*network.BlockRequestMessage - } + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) - var ( - max128 = uint32(128) - max9 = uint32(9) - max64 = uint32(64) - ) - - testCases := map[string]testCase{ - "test_0": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(1 + maxResponseSize), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_1": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(1 + (maxResponseSize * 2)), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - { - RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, - StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_2": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(10), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_3": { - w: &worker{ - startNumber: uintPtr(10), - targetNumber: uintPtr(1), - direction: network.Descending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(10), - Direction: network.Descending, - Max: &max9, - }, - }, - }, - "test_4": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - { - RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, - StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_5": { - w: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(10), - targetHash: common.Hash{0xa}, - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_6": { - w: &worker{ - startNumber: uintPtr(1), - startHash: common.Hash{0xb}, - targetNumber: uintPtr(10), - targetHash: common.Hash{0xc}, - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(common.Hash{0xb}), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_7": { - w: &worker{ - startNumber: uintPtr(10), - targetNumber: uintPtr(10), - direction: network.Ascending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(10), - Direction: network.Ascending, - Max: &max128, - }, - }, - }, - "test_8": { - w: &worker{ - startNumber: uintPtr(1 + maxResponseSize + (maxResponseSize / 2)), - targetNumber: uintPtr(1), - direction: network.Descending, - requestData: bootstrapRequestData, - }, - expected: []*network.BlockRequestMessage{ - { - RequestedData: network.RequestedDataHeader + network.RequestedDataBody + network.RequestedDataJustification, - StartingBlock: *variadic.MustNewUint32OrHash(1 + (maxResponseSize / 2)), - Direction: network.Descending, - Max: &max64, - }, - { - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1 + maxResponseSize + (maxResponseSize / 2)), - Direction: network.Descending, - Max: &max128, - }, - }, - }, + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], } - for name, tc := range testCases { - tc := tc - t.Run(name, func(t *testing.T) { - t.Parallel() - reqs, err := workerToRequests(tc.w) - require.NoError(t, err) - require.Equal(t, tc.expected, reqs) - }) + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(peerID, _, response any) any { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method will fail + // then alice should pick the failed request and re-execute it which will + // be the third call + responsePtr := response.(*network.BlockResponseMessage) + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + *responsePtr = *worker1Response + return nil + } + + if pID == peer.ID("bob") { + return errors.New("protocols not supported") + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } + + *responsePtr = *worker2Response + return nil + }).Times(3) + + // since peer.ID("bob") will fail with protocols not supported his + // reputation will be affected and + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadProtocolValue, + Reason: peerset.BadProtocolReason, + }, peer.ID("bob")) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] + require.True(t, ok) + require.Equal(t, punished, syncWorker.status) } -func TestChainSync_validateResponse(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithNilHeaderInResponse(t *testing.T) { t.Parallel() - badBlockHash := common.NewHash([]byte("badblockhash")) - - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - networkBuilder func(ctrl *gomock.Controller) Network - req *network.BlockRequestMessage - resp *network.BlockResponseMessage - expectedError error - }{ - "nil_req,_nil_resp": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - expectedError: errEmptyBlockData, - }, - "handle_error_response_is_not_chain,_has_header": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Header: &types.Header{ - Number: 1, - }, - Body: &types.Body{}, - }, - { - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, - }, - expectedError: errResponseIsNotChain, - }, - "handle_justification-only_request,_unknown_block": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ - Value: peerset.BadJustificationValue, - Reason: peerset.BadJustificationReason, - }, peer.ID("")) - return mockNetwork - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataJustification, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Justification: &[]byte{0}, - }, - }, - }, - expectedError: errUnknownBlockForJustification, - }, - "handle_error_unknown_parent": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Header: &types.Header{ - Number: 1, - }, - Body: &types.Body{}, - }, - { - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, - }, - expectedError: errUnknownParent, - }, - "handle_error_bad_block": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: badBlockHash, - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, - }, - expectedError: errBadBlock, - }, - "no_error": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - return NewMockNetwork(ctrl) - }, - req: &network.BlockRequestMessage{ - RequestedData: network.RequestedDataHeader, - }, - resp: &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - { - Header: &types.Header{ - ParentHash: (&types.Header{ - Number: 2, - }).Hash(), - Number: 3, - }, - Body: &types.Body{}, - }, - }, - }, - }, + + ctrl := gomock.NewController(t) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) + + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) + + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false + + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - cfg := chainSyncConfig{ - bs: tt.blockStateBuilder(ctrl), - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - readyBlocks: newBlockQueue(maxResponseSize), - net: tt.networkBuilder(ctrl), - badBlocks: []string{ - badBlockHash.String(), - }, - } - mockReqRes := NewMockRequestMaker(ctrl) + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) - cs := newChainSync(cfg, mockReqRes) + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], + } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(peerID, _, response any) any { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an + // response item but without header as was requested + responsePtr := response.(*network.BlockResponseMessage) + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + *responsePtr = *worker1Response + return nil + } - err := cs.validateResponse(tt.req, tt.resp, "") - if tt.expectedError != nil { - assert.EqualError(t, err, tt.expectedError.Error()) - } else { - assert.NoError(t, err) + if pID == peer.ID("bob") { + incompleteBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) + incompleteBlockData.BlockData[0].Header = nil + + *responsePtr = *incompleteBlockData + return nil + } + + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) } - }) - } + + *responsePtr = *worker2Response + return nil + }).Times(3) + + // since peer.ID("bob") will fail with protocols not supported his + // reputation will be affected and + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, peer.ID("bob")) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() + require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) + + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] + require.True(t, ok) + require.Equal(t, punished, syncWorker.status) } -func TestChainSync_doSync(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithResponseIsNotAChain(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) - readyBlocks := newBlockQueue(maxResponseSize) - cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) - - max := uint32(1) - req := &network.BlockRequestMessage{ - RequestedData: bootstrapRequestData, - StartingBlock: *variadic.MustNewUint32OrHash(1), - Direction: network.Ascending, - Max: &max, - } - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(true, nil).Times(2) - cs.blockState = mockBlockState + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) - workerErr := cs.doSync(req, make(map[peer.ID]struct{})) - require.NotNil(t, workerErr) - require.Equal(t, errNoPeers, workerErr.err) + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) - cs.peerState["noot"] = &peerState{ - number: 100, - } + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) - mockNetwork := NewMockNetwork(ctrl) - startingBlock := variadic.MustNewUint32OrHash(1) - max1 := uint32(1) + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false - mockReqRes := NewMockRequestMaker(ctrl) - mockReqRes.EXPECT().Do(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, - StartingBlock: *startingBlock, - Direction: 0, - Max: &max1, - }, &network.BlockResponseMessage{}) - cs.blockReqRes = mockReqRes - - cs.network = mockNetwork - - workerErr = cs.doSync(req, make(map[peer.ID]struct{})) - require.NotNil(t, workerErr) - require.Equal(t, errEmptyBlockData, workerErr.err) - - expectedResp := &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: common.Hash{0x1}, - Header: &types.Header{ - Number: 1, - }, - Body: &types.Body{}, - }, - }, + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], } - mockReqRes.EXPECT().Do(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, - StartingBlock: *startingBlock, - Direction: 0, - Max: &max1, - }, &network.BlockResponseMessage{}).Do( - func(_ peer.ID, _ *network.BlockRequestMessage, resp *network.BlockResponseMessage) { - *resp = *expectedResp - }, - ) + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) - workerErr = cs.doSync(req, make(map[peer.ID]struct{})) - require.Nil(t, workerErr) - bd, err := readyBlocks.pop(context.Background()) - require.NotNil(t, bd) - require.NoError(t, err) - require.Equal(t, expectedResp.BlockData[0], bd) - - parent := (&types.Header{ - Number: 2, - }).Hash() - expectedResp = &network.BlockResponseMessage{ - BlockData: []*types.BlockData{ - { - Hash: common.Hash{0x3}, - Header: &types.Header{ - ParentHash: parent, - Number: 3, - }, - Body: &types.Body{}, - }, - { - Hash: common.Hash{0x2}, - Header: &types.Header{ - Number: 2, - }, - Body: &types.Body{}, - }, - }, + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], } + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(peerID, _, response any) any { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an + // response that does not form an chain + responsePtr := response.(*network.BlockResponseMessage) + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + *responsePtr = *worker1Response + return nil + } - // test to see if descending blocks get reversed - req.Direction = network.Descending + if pID == peer.ID("bob") { + notAChainBlockData := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 128, 256) + // swap positions to force the problem + firstItem := notAChainBlockData.BlockData[0] + notAChainBlockData.BlockData[0] = notAChainBlockData.BlockData[130] + notAChainBlockData.BlockData[130] = firstItem - mockReqRes.EXPECT().Do(peer.ID("noot"), &network.BlockRequestMessage{ - RequestedData: 19, - StartingBlock: *startingBlock, - Direction: 1, - Max: &max1, - }, &network.BlockResponseMessage{}).Do( - func(_ peer.ID, _ *network.BlockRequestMessage, resp *network.BlockResponseMessage) { - *resp = *expectedResp - }, - ) + *responsePtr = *notAChainBlockData + return nil + } - cs.network = mockNetwork - workerErr = cs.doSync(req, make(map[peer.ID]struct{})) - require.Nil(t, workerErr) + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } - bd, err = readyBlocks.pop(context.Background()) - require.NotNil(t, bd) - require.Equal(t, expectedResp.BlockData[0], bd) + *responsePtr = *worker2Response + return nil + }).Times(3) + + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() require.NoError(t, err) + require.Equal(t, uint(blocksAhead), target) - bd, err = readyBlocks.pop(context.Background()) - require.NotNil(t, bd) - require.Equal(t, expectedResp.BlockData[1], bd) + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) + + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be punished + syncWorker, ok := cs.workerPool.workers[peer.ID("bob")] + require.True(t, ok) + require.Equal(t, punished, syncWorker.status) } -func TestHandleReadyBlock(t *testing.T) { +func TestChainSync_BootstrapSync_SuccessfulSync_WithReceivedBadBlock(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) - readyBlocks := newBlockQueue(maxResponseSize) - cs := newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) - // test that descendant chain gets returned by getReadyDescendants on block 1 being ready - header1 := &types.Header{ - Number: 1, - } - block1 := &types.Block{ - Header: *header1, - Body: types.Body{}, - } + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) - header2 := &types.Header{ - ParentHash: header1.Hash(), - Number: 2, - } - block2 := &types.Block{ - Header: *header2, - Body: types.Body{}, - } - cs.pendingBlocks.addBlock(block2) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) - header3 := &types.Header{ - ParentHash: header2.Hash(), - Number: 3, - } - block3 := &types.Block{ - Header: *header3, - Body: types.Body{}, - } - cs.pendingBlocks.addBlock(block3) + // this test expects two workers responding each request with 128 blocks which means + // we should import 256 blocks in total + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 256) + const announceBlock = false - header2NotDescendant := &types.Header{ - ParentHash: common.Hash{0xff}, - Number: 2, + // here we split the whole set in two parts each one will be the "response" for each peer + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:128], } - block2NotDescendant := &types.Block{ - Header: *header2NotDescendant, - Body: types.Body{}, + + // the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 128 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker2Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[128:], } - cs.pendingBlocks.addBlock(block2NotDescendant) + // the worker 2 will respond from block 129 to 256 so the ensureBlockImportFlow + // will setup the expectations starting from block 128, from previous worker, until block 256 + parent := worker1Response.BlockData[127] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker2Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + fakeBadBlockHash := common.MustHexToHash("0x18767cb4bb4cc13bf119f6613aec5487d4c06a2e453de53d34aea6f3f1ee9855") + + // we use gomock.Any since I cannot guarantee which peer picks which request + // but the first call to DoBlockRequest will return the first set and the second + // call will return the second set + doBlockRequestCount := 0 + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(peerID, _, response any) any { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice) and peer.ID(bob). When bob calls, this method return an + // response that contains a know bad block + responsePtr := response.(*network.BlockResponseMessage) + defer func() { doBlockRequestCount++ }() + + pID := peerID.(peer.ID) // cast to peer ID + switch doBlockRequestCount { + case 0, 1: + if pID == peer.ID("alice") { + *responsePtr = *worker1Response + return nil + } - cs.handleReadyBlock(block1.ToBlockData()) + if pID == peer.ID("bob") { + blockDataWithBadBlock := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 129, 256) + blockDataWithBadBlock.BlockData[4].Hash = fakeBadBlockHash + *responsePtr = *blockDataWithBadBlock + return nil + } - require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header1.Hash())) - require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2.Hash())) - require.False(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header3.Hash())) - require.True(t, cs.pendingBlocks.(*disjointBlockSet).hasBlock(header2NotDescendant.Hash())) + require.FailNow(t, "expected calls by %s and %s, got: %s", + peer.ID("alice"), peer.ID("bob"), pID) + default: + // ensure the the third call will be made by peer.ID("alice") + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + } - blockData1, err := readyBlocks.pop(context.Background()) + *responsePtr = *worker2Response + return nil + }).Times(3) + + mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, peer.ID("bob")) + // setup a chain sync which holds in its peer view map + // 3 peers, each one announce block 129 as its best block number. + // We start this test with genesis block being our best block, so + // we're far behind by 128 blocks, we should execute a bootstrap + // sync request those blocks + const blocksAhead = 257 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + cs.badBlocks = []string{fakeBadBlockHash.String()} + + target, err := cs.getTarget() require.NoError(t, err) - require.Equal(t, block1.ToBlockData(), blockData1) + require.Equal(t, uint(blocksAhead), target) - blockData2, err := readyBlocks.pop(context.Background()) - require.NoError(t, err) - require.Equal(t, block2.ToBlockData(), blockData2) + // include a new worker in the worker pool set, this worker + // should be an available peer that will receive a block request + // the worker pool executes the workers management + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) + cs.workerPool.fromBlockAnnounce(peer.ID("bob")) + + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) - blockData3, err := readyBlocks.pop(context.Background()) + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) require.NoError(t, err) - require.Equal(t, block3.ToBlockData(), blockData3) + + close(stopCh) + <-cs.workerPool.doneCh + + // peer should be not in the worker pool + // peer should be in the ignore list + _, ok := cs.workerPool.workers[peer.ID("bob")] + require.False(t, ok) + + _, ok = cs.workerPool.ignorePeers[peer.ID("bob")] + require.True(t, ok) } -func TestChainSync_determineSyncPeers(t *testing.T) { +func TestChainSync_BootstrapSync_SucessfulSync_ReceivedPartialBlockData(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) - cs := newTestChainSync(ctrl) + mockBlockState := NewMockBlockState(ctrl) + mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) + mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash, + trie.EmptyHash, 0, types.NewDigest()) + + mockNetwork := NewMockNetwork(ctrl) + mockRequestMaker := NewMockRequestMaker(ctrl) - req := &network.BlockRequestMessage{} - testPeerA := peer.ID("a") - testPeerB := peer.ID("b") - peersTried := make(map[peer.ID]struct{}) + mockBabeVerifier := NewMockBabeVerifier(ctrl) + mockStorageState := NewMockStorageState(ctrl) + mockImportHandler := NewMockBlockImportHandler(ctrl) + mockTelemetry := NewMockTelemetry(ctrl) - // test base case - cs.peerState[testPeerA] = &peerState{ - number: 129, + // create a set of 128 blocks + blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 128) + const announceBlock = false + + // the worker will return a partial size of the set + worker1Response := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[:97], } - cs.peerState[testPeerB] = &peerState{ - number: 257, + + // the first peer will respond the from the block 1 to 96 so the ensureBlockImportFlow + // will setup the expectations starting from the genesis header until block 96 + ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + worker1MissingBlocksResponse := &network.BlockResponseMessage{ + BlockData: blockResponse.BlockData[97:], } - peers := cs.determineSyncPeers(req, peersTried) - require.Equal(t, 2, len(peers)) - require.Contains(t, peers, testPeerA) - require.Contains(t, peers, testPeerB) - - // test peer ignored case - cs.ignorePeers[testPeerA] = struct{}{} - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 1, len(peers)) - require.Equal(t, []peer.ID{testPeerB}, peers) - - // test all peers ignored case - cs.ignorePeers[testPeerB] = struct{}{} - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 2, len(peers)) - require.Contains(t, peers, testPeerA) - require.Contains(t, peers, testPeerB) - require.Equal(t, 0, len(cs.ignorePeers)) - - // test peer's best block below number case, shouldn't include that peer - start, err := variadic.NewUint32OrHash(130) + // last item from the previous response + parent := worker1Response.BlockData[96] + ensureSuccessfulBlockImportFlow(t, parent.Header, worker1MissingBlocksResponse.BlockData, mockBlockState, + mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, announceBlock) + + doBlockRequestCount := 0 + mockRequestMaker.EXPECT(). + Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}). + DoAndReturn(func(peerID, _, response any) any { + // lets ensure that the DoBlockRequest is called by + // peer.ID(alice). The first call will return only 97 blocks + // the handler should issue another call to retrieve the missing blocks + pID := peerID.(peer.ID) // cast to peer ID + require.Equalf(t, pID, peer.ID("alice"), + "expect third call be made by %s, got: %s", peer.ID("alice"), pID) + + responsePtr := response.(*network.BlockResponseMessage) + defer func() { doBlockRequestCount++ }() + + if doBlockRequestCount == 0 { + *responsePtr = *worker1Response + return nil + } + + *responsePtr = *worker1MissingBlocksResponse + return nil + }).Times(2) + + const blocksAhead = 256 + cs := setupChainSyncToBootstrapMode(t, blocksAhead, + mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier, + mockStorageState, mockImportHandler, mockTelemetry) + + target, err := cs.getTarget() require.NoError(t, err) - req.StartingBlock = *start - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 1, len(peers)) - require.Equal(t, []peer.ID{testPeerB}, peers) - - // test peer tried case, should ignore peer already tried - peersTried[testPeerA] = struct{}{} - req.StartingBlock = variadic.Uint32OrHash{} - peers = cs.determineSyncPeers(req, peersTried) - require.Equal(t, 1, len(peers)) - require.Equal(t, []peer.ID{testPeerB}, peers) -} + require.Equal(t, uint(blocksAhead), target) -func Test_chainSync_logSyncSpeed(t *testing.T) { - t.Parallel() + cs.workerPool.fromBlockAnnounce(peer.ID("alice")) - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - networkBuilder func(ctrl *gomock.Controller) Network - state chainSyncState - benchmarker *syncBenchmarker - } - tests := []struct { - name string - fields fields - }{ - { - name: "state_bootstrap", - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return(nil) - return mockNetwork - }, - benchmarker: newSyncBenchmarker(10), - state: bootstrap, - }, - }, - { - name: "case_tip", - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil).Times(3) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{}, nil) - return mockBlockState - }, - networkBuilder: func(ctrl *gomock.Controller) Network { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().Peers().Return(nil) - return mockNetwork - }, - benchmarker: newSyncBenchmarker(10), - state: tip, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - ctx, cancel := context.WithCancel(context.Background()) - tickerChannel := make(chan time.Time) - cs := &chainSync{ - ctx: ctx, - cancel: cancel, - blockState: tt.fields.blockStateBuilder(ctrl), - network: tt.fields.networkBuilder(ctrl), - state: tt.fields.state, - benchmarker: tt.fields.benchmarker, - logSyncTickerC: tickerChannel, - logSyncTicker: time.NewTicker(time.Hour), // just here to be stopped - logSyncDone: make(chan struct{}), - } + stopCh := make(chan struct{}) + go cs.workerPool.listenForRequests(stopCh) - go cs.logSyncSpeed() + err = cs.requestMaxBlocksFrom(mockedGenesisHeader) + require.NoError(t, err) - tickerChannel <- time.Time{} - cs.cancel() - <-cs.logSyncDone - }) - } -} + close(stopCh) + <-cs.workerPool.doneCh -func Test_chainSync_start(t *testing.T) { - t.Parallel() + require.Len(t, cs.workerPool.workers, 1) - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - disjointBlockSetBuilder func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet - benchmarker *syncBenchmarker + _, ok := cs.workerPool.workers[peer.ID("alice")] + require.True(t, ok) +} + +func createSuccesfullBlockResponse(_ *testing.T, genesisHash common.Hash, + startingAt, numBlocks int) *network.BlockResponseMessage { + response := new(network.BlockResponseMessage) + response.BlockData = make([]*types.BlockData, numBlocks) + + emptyTrieState := storage.NewTrieState(nil) + tsRoot := emptyTrieState.MustRoot() + + firstHeader := types.NewHeader(genesisHash, tsRoot, common.Hash{}, + uint(startingAt), scale.VaryingDataTypeSlice{}) + response.BlockData[0] = &types.BlockData{ + Hash: firstHeader.Hash(), + Header: firstHeader, + Body: types.NewBody([]types.Extrinsic{}), + Justification: nil, } - tests := []struct { - name string - fields fields - }{ - { - name: "base_case", - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().BestBlockHeader().Return(&types.Header{}, nil) - return mockBlockState - }, - disjointBlockSetBuilder: func(ctrl *gomock.Controller, called chan<- struct{}) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().run(gomock.AssignableToTypeOf(make(<-chan struct{}))). - DoAndReturn(func(stop <-chan struct{}) { - close(called) // test glue, ideally we would use a ready chan struct passed to run(). - }) - return mockDisjointBlockSet - }, - benchmarker: newSyncBenchmarker(1), - }, - }, + + parentHash := firstHeader.Hash() + for idx := 1; idx < numBlocks; idx++ { + blockNumber := idx + startingAt + header := types.NewHeader(parentHash, tsRoot, common.Hash{}, + uint(blockNumber), scale.VaryingDataTypeSlice{}) + response.BlockData[idx] = &types.BlockData{ + Hash: header.Hash(), + Header: header, + Body: types.NewBody([]types.Extrinsic{}), + Justification: nil, + } + parentHash = header.Hash() } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - ctx, cancel := context.WithCancel(context.Background()) - disjointBlockSetCalled := make(chan struct{}) - cs := &chainSync{ - ctx: ctx, - cancel: cancel, - blockState: tt.fields.blockStateBuilder(ctrl), - pendingBlocks: tt.fields.disjointBlockSetBuilder(ctrl, disjointBlockSetCalled), - benchmarker: tt.fields.benchmarker, - slotDuration: time.Hour, - logSyncTicker: time.NewTicker(time.Hour), // just here to be closed - logSyncDone: make(chan struct{}), - } - cs.start() - <-disjointBlockSetCalled - cs.stop() - }) + + return response +} + +// ensureSuccessfulBlockImportFlow will setup the expectations for method calls +// that happens while chain sync imports a block +func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header, + blocksReceived []*types.BlockData, mockBlockState *MockBlockState, + mockBabeVerifier *MockBabeVerifier, mockStorageState *MockStorageState, + mockImportHandler *MockBlockImportHandler, mockTelemetry *MockTelemetry, announceBlock bool) { + t.Helper() + + for idx, blockData := range blocksReceived { + mockBlockState.EXPECT().HasHeader(blockData.Header.Hash()).Return(false, nil) + mockBlockState.EXPECT().HasBlockBody(blockData.Header.Hash()).Return(false, nil) + mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil) + + var previousHeader *types.Header + if idx == 0 { + previousHeader = parentHeader + } else { + previousHeader = blocksReceived[idx-1].Header + } + + mockBlockState.EXPECT().GetHeader(blockData.Header.ParentHash).Return(previousHeader, nil) + mockStorageState.EXPECT().Lock() + mockStorageState.EXPECT().Unlock() + + emptyTrieState := storage.NewTrieState(nil) + parentStateRoot := previousHeader.StateRoot + mockStorageState.EXPECT().TrieState(&parentStateRoot). + Return(emptyTrieState, nil) + + ctrl := gomock.NewController(t) + mockRuntimeInstance := NewMockInstance(ctrl) + mockBlockState.EXPECT().GetRuntime(previousHeader.Hash()). + Return(mockRuntimeInstance, nil) + + expectedBlock := &types.Block{ + Header: *blockData.Header, + Body: *blockData.Body, + } + + mockRuntimeInstance.EXPECT().SetContextStorage(emptyTrieState) + mockRuntimeInstance.EXPECT().ExecuteBlock(expectedBlock). + Return(nil, nil) + + mockImportHandler.EXPECT().HandleBlockImport(expectedBlock, emptyTrieState, announceBlock). + Return(nil) + + blockHash := blockData.Header.Hash() + expectedTelemetryMessage := telemetry.NewBlockImport( + &blockHash, + blockData.Header.Number, + "NetworkInitialSync") + mockTelemetry.EXPECT().SendMessage(expectedTelemetryMessage) + + mockBlockState.EXPECT().CompareAndSetBlockData(blockData).Return(nil) } } -func Test_chainSync_setBlockAnnounce(t *testing.T) { +func TestChainSync_validateResponseFields(t *testing.T) { t.Parallel() - type args struct { - from peer.ID - header *types.Header + block1Header := &types.Header{ + ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), + Number: 2, + } + + block2Header := &types.Header{ + ParentHash: block1Header.Hash(), + Number: 3, } - tests := map[string]struct { - chainSyncBuilder func(*types.Header, *gomock.Controller) chainSync - args args - wantErr error + + cases := map[string]struct { + wantErr error + errString string + setupChainSync func(t *testing.T) *chainSync + requestedData byte + blockData *types.BlockData }{ - "base_case": { - wantErr: blocktree.ErrBlockExists, - args: args{ - header: &types.Header{Number: 2}, - }, - chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().HasHeader(common.MustHexToHash( - "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")).Return(true, nil) - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - return chainSync{ - blockState: mockBlockState, - pendingBlocks: mockDisjointBlockSet, + "requested_bootstrap_data_but_got_nil_header": { + wantErr: errNilHeaderInResponse, + errString: "expected header, received none: " + + block2Header.Hash().String(), + requestedData: network.BootstrapRequestData, + blockData: &types.BlockData{ + Hash: block2Header.Hash(), + Header: nil, + Body: &types.Body{}, + Justification: &[]byte{0}, + }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) + + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.IncompleteHeaderValue, + Reason: peerset.IncompleteHeaderReason, + }, peer.ID("peer")) + + return &chainSync{ + blockState: blockStateMock, + network: networkMock, } }, }, - "err_when_calling_has_header": { - wantErr: errors.New("checking header exists"), - args: args{ - header: &types.Header{Number: 2}, - }, - chainSyncBuilder: func(_ *types.Header, ctrl *gomock.Controller) chainSync { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT(). - HasHeader(common.MustHexToHash( - "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf")). - Return(false, errors.New("checking header exists")) - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - return chainSync{ - blockState: mockBlockState, - pendingBlocks: mockDisjointBlockSet, + "requested_bootstrap_data_but_got_nil_body": { + wantErr: errNilBodyInResponse, + errString: "expected body, received none: " + + block2Header.Hash().String(), + requestedData: network.BootstrapRequestData, + blockData: &types.BlockData{ + Hash: block2Header.Hash(), + Header: block2Header, + Body: nil, + Justification: &[]byte{0}, + }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) + networkMock := NewMockNetwork(ctrl) + + return &chainSync{ + blockState: blockStateMock, + network: networkMock, } }, }, - "adding_block_header_to_pending_blocks": { - args: args{ - header: &types.Header{Number: 2}, - }, - chainSyncBuilder: func(expectedHeader *types.Header, ctrl *gomock.Controller) chainSync { - argumentHeaderHash := common.MustHexToHash( - "0x05bdcc454f60a08d427d05e7f19f240fdc391f570ab76fcb96ecca0b5823d3bf") - - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT(). - HasHeader(argumentHeaderHash). - Return(false, nil) + "requested_only_justification_but_got_nil": { + wantErr: errNilJustificationInResponse, + errString: "expected justification, received none: " + + block2Header.Hash().String(), + requestedData: network.RequestedDataJustification, + blockData: &types.BlockData{ + Hash: block2Header.Hash(), + Header: block2Header, + Body: nil, + Justification: nil, + }, + setupChainSync: func(t *testing.T) *chainSync { + ctrl := gomock.NewController(t) + blockStateMock := NewMockBlockState(ctrl) + blockStateMock.EXPECT().HasHeader(block1Header.ParentHash).Return(true, nil) + networkMock := NewMockNetwork(ctrl) - mockBlockState.EXPECT(). - BestBlockHeader(). - Return(&types.Header{Number: 1}, nil) - - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT(). - addHeader(expectedHeader). - Return(nil) - - mockDisjointBlockSet.EXPECT(). - addHashAndNumber(argumentHeaderHash, uint(2)). - Return(nil) - - return chainSync{ - blockState: mockBlockState, - pendingBlocks: mockDisjointBlockSet, - peerState: make(map[peer.ID]*peerState), - // creating an buffered channel for this specific test - // since it will put a work on the queue and an unbufered channel - // will hang until we read on this channel and the goal is to - // put the work on the channel and don't block - workQueue: make(chan *peerState, 1), + return &chainSync{ + blockState: blockStateMock, + network: networkMock, } }, }, } - for name, tt := range tests { + + for tname, tt := range cases { tt := tt - t.Run(name, func(t *testing.T) { + t.Run(tname, func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) - sync := tt.chainSyncBuilder(tt.args.header, ctrl) - err := sync.setBlockAnnounce(tt.args.from, tt.args.header) - if tt.wantErr != nil { - assert.EqualError(t, err, tt.wantErr.Error()) - } else { - assert.NoError(t, err) - } - if sync.workQueue != nil { - assert.Equal(t, len(sync.workQueue), 1) + err := validateResponseFields(tt.requestedData, []*types.BlockData{tt.blockData}) + require.ErrorIs(t, err, tt.wantErr) + if tt.errString != "" { + require.EqualError(t, err, tt.errString) } }) } } -func Test_chainSync_getHighestBlock(t *testing.T) { +func TestChainSync_isResponseAChain(t *testing.T) { t.Parallel() - tests := []struct { - name string - peerState map[peer.ID]*peerState - wantHighestBlock uint - expectedError error - }{ - { - name: "error no peers", - expectedError: errors.New("no peers to sync with"), - }, - { - name: "base case", - peerState: map[peer.ID]*peerState{"1": {number: 2}}, - wantHighestBlock: 2, - }, + block1Header := &types.Header{ + ParentHash: common.MustHexToHash("0x00597cb4bb4cc13bf119f6613aec7642d4c06a2e453de53d34aea6f3f1eeb504"), + Number: 2, } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cs := &chainSync{ - peerState: tt.peerState, - } - gotHighestBlock, err := cs.getHighestBlock() - if tt.expectedError != nil { - assert.EqualError(t, err, tt.expectedError.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.wantHighestBlock, gotHighestBlock) - }) + + block2Header := &types.Header{ + ParentHash: block1Header.Hash(), + Number: 3, } -} -func Test_chainSync_handleResult(t *testing.T) { - t.Parallel() - mockError := errors.New("test mock error") - tests := map[string]struct { - chainSyncBuilder func(ctrl *gomock.Controller, result *worker) chainSync - maxWorkerRetries uint16 - res *worker - err error + block4Header := &types.Header{ + ParentHash: common.MustHexToHash("0x198616547187613bf119f6613aec7642d4c06a2e453de53d34aea6f390788677"), + Number: 4, + } + + cases := map[string]struct { + expected bool + blockData []*types.BlockData }{ - "res.err_==_nil": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{}, - }, - "res.err.err.Error()_==_context.Canceled": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: context.Canceled, - }, - }, - }, - "res.err.err.Error()_==_context.DeadlineExceeded": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -1024, Reason: "Request timeout"}, - peer.ID("")) - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - return chainSync{ - workerState: newWorkerState(), - network: mockNetwork, - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: context.DeadlineExceeded, - }, - }, - }, - "res.err.err.Error()_dial_backoff": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New("dial backoff"), - }, - }, - }, - "res.err.err.Error()_==_errNoPeers": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - return chainSync{ - workerState: newWorkerState(), - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errNoPeers, - }, - }, - }, - "res.err.err.Error()_==_protocol_not_supported": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockNetwork := NewMockNetwork(ctrl) - mockNetwork.EXPECT().ReportPeer(peerset.ReputationChange{Value: -2147483648, - Reason: "Unsupported protocol"}, - peer.ID("")) - return chainSync{ - workerState: newWorkerState(), - network: mockNetwork, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New("protocol not supported"), - }, - }, - }, - "no_error,_no_retries": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), + "not_a_chain": { + expected: false, + blockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, }, - }, - }, - "handle_work_result_error,_no_retries": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, mockError) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), + { + Hash: block2Header.Hash(), + Header: block2Header, + Body: &types.Body{}, + Justification: &[]byte{0}, }, - }, - err: mockError, - }, - "handle_work_result_nil,_no_retries": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(nil, nil) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), + { + Hash: block4Header.Hash(), + Header: block4Header, + Body: &types.Body{}, + Justification: &[]byte{0}, }, }, }, - "no_error,_maxWorkerRetries_2": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - pendingBlocks: mockDisjointBlockSet, - } - }, - maxWorkerRetries: 2, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: errors.New(""), + "is_a_chain": { + expected: true, + blockData: []*types.BlockData{ + { + Hash: block1Header.Hash(), + Header: block1Header, + Body: &types.Body{}, + Justification: &[]byte{0}, }, - pendingBlock: newPendingBlock(common.Hash{}, 1, nil, nil, time.Now()), - }, - }, - "no_error": { - chainSyncBuilder: func(ctrl *gomock.Controller, result *worker) chainSync { - mockWorkHandler := NewMockworkHandler(ctrl) - mockWorkHandler.EXPECT().handleWorkerResult(result).Return(result, nil) - mockWorkHandler.EXPECT().hasCurrentWorker(&worker{ - ctx: context.Background(), - err: &workerError{ - err: mockError, - }, - retryCount: 1, - peersTried: map[peer.ID]struct{}{ - "": {}, - }, - }, newWorkerState().workers).Return(true) - return chainSync{ - workerState: newWorkerState(), - handler: mockWorkHandler, - maxWorkerRetries: 2, - } - }, - res: &worker{ - ctx: context.Background(), - err: &workerError{ - err: mockError, + { + Hash: block2Header.Hash(), + Header: block2Header, + Body: &types.Body{}, + Justification: &[]byte{0}, }, }, }, } - for testName, tt := range tests { + + for tname, tt := range cases { tt := tt - t.Run(testName, func(t *testing.T) { + t.Run(tname, func(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) - sync := tt.chainSyncBuilder(ctrl, tt.res) - err := sync.handleResult(tt.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } + output := isResponseAChain(tt.blockData) + require.Equal(t, tt.expected, output) }) } } -func newTestChainSyncWithReadyBlocks(ctrl *gomock.Controller, readyBlocks *blockQueue) *chainSync { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo)) +func TestChainSync_getHighestBlock(t *testing.T) { + t.Parallel() - cfg := chainSyncConfig{ - bs: mockBlockState, - readyBlocks: readyBlocks, - pendingBlocks: newDisjointBlockSet(pendingBlocksLimit), - minPeers: 1, - maxPeers: 5, - slotDuration: defaultSlotDuration, + cases := map[string]struct { + expectedHighestBlock uint + wantErr error + chainSyncPeerView map[peer.ID]*peerView + }{ + "no_peer_view": { + wantErr: errNoPeers, + expectedHighestBlock: 0, + chainSyncPeerView: make(map[peer.ID]*peerView), + }, + "highest_block": { + expectedHighestBlock: 500, + chainSyncPeerView: map[peer.ID]*peerView{ + peer.ID("peer-A"): { + number: 100, + }, + peer.ID("peer-B"): { + number: 500, + }, + }, + }, } - mockReqRes := NewMockRequestMaker(ctrl) - return newChainSync(cfg, mockReqRes) -} + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() -func newTestChainSync(ctrl *gomock.Controller) *chainSync { - readyBlocks := newBlockQueue(maxResponseSize) - return newTestChainSyncWithReadyBlocks(ctrl, readyBlocks) + chainSync := &chainSync{ + peerView: tt.chainSyncPeerView, + } + + highestBlock, err := chainSync.getHighestBlock() + require.ErrorIs(t, err, tt.wantErr) + require.Equal(t, tt.expectedHighestBlock, highestBlock) + }) + } } diff --git a/dot/sync/disjoint_block_set.go b/dot/sync/disjoint_block_set.go index 69c5462c16..400b3877e9 100644 --- a/dot/sync/disjoint_block_set.go +++ b/dot/sync/disjoint_block_set.go @@ -10,6 +10,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" + "golang.org/x/exp/maps" ) const ( @@ -26,7 +27,7 @@ var ( // DisjointBlockSet represents a set of incomplete blocks, or blocks // with an unknown parent. it is implemented by *disjointBlockSet type DisjointBlockSet interface { - run(done <-chan struct{}) + run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}) addHashAndNumber(hash common.Hash, number uint) error addHeader(*types.Header) error addBlock(*types.Block) error @@ -35,7 +36,7 @@ type DisjointBlockSet interface { removeLowerBlocks(num uint) getBlock(common.Hash) *pendingBlock getBlocks() []*pendingBlock - getReadyDescendants(curr common.Hash, ready []*types.BlockData) []*types.BlockData + hasBlock(common.Hash) bool size() int } @@ -113,7 +114,7 @@ func newDisjointBlockSet(limit int) *disjointBlockSet { } } -func (s *disjointBlockSet) run(done <-chan struct{}) { +func (s *disjointBlockSet) run(finalisedCh <-chan *types.FinalisationInfo, stop <-chan struct{}) { ticker := time.NewTicker(clearBlocksInterval) defer ticker.Stop() @@ -121,7 +122,9 @@ func (s *disjointBlockSet) run(done <-chan struct{}) { select { case <-ticker.C: s.clearBlocks() - case <-done: + case finalisedInfo := <-finalisedCh: + s.removeLowerBlocks(finalisedInfo.Header.Number) + case <-stop: return } } @@ -270,12 +273,6 @@ func (s *disjointBlockSet) size() int { return len(s.blocks) } -func (s *disjointBlockSet) getChildren(hash common.Hash) map[common.Hash]struct{} { - s.RLock() - defer s.RUnlock() - return s.parentToChildren[hash] -} - func (s *disjointBlockSet) getBlock(hash common.Hash) *pendingBlock { s.RLock() defer s.RUnlock() @@ -286,32 +283,5 @@ func (s *disjointBlockSet) getBlocks() []*pendingBlock { s.RLock() defer s.RUnlock() - blocks := make([]*pendingBlock, len(s.blocks)) - i := 0 - for _, b := range s.blocks { - blocks[i] = b - i++ - } - return blocks -} - -// getReadyDescendants recursively checks for descendants that are ready to be processed -func (s *disjointBlockSet) getReadyDescendants(curr common.Hash, ready []*types.BlockData) []*types.BlockData { - children := s.getChildren(curr) - if len(children) == 0 { - return ready - } - - for c := range children { - b := s.getBlock(c) - if b == nil || b.header == nil || b.body == nil { - continue - } - - // if the entire block's data is known, it's ready! - ready = append(ready, b.toBlockData()) - ready = s.getReadyDescendants(c, ready) - } - - return ready + return maps.Values(s.blocks) } diff --git a/dot/sync/disjoint_block_set_integration_test.go b/dot/sync/disjoint_block_set_integration_test.go index 2497b8f290..ec6745ba56 100644 --- a/dot/sync/disjoint_block_set_integration_test.go +++ b/dot/sync/disjoint_block_set_integration_test.go @@ -113,102 +113,6 @@ func TestPendingBlock_toBlockData(t *testing.T) { require.Equal(t, expected, pb.toBlockData()) } -func TestDisjointBlockSet_getReadyDescendants(t *testing.T) { - s := newDisjointBlockSet(pendingBlocksLimit) - - // test that descendant chain gets returned by getReadyDescendants on block 1 being ready - header1 := &types.Header{ - Number: 1, - } - block1 := &types.Block{ - Header: *header1, - Body: types.Body{}, - } - - header2 := &types.Header{ - ParentHash: header1.Hash(), - Number: 2, - } - block2 := &types.Block{ - Header: *header2, - Body: types.Body{}, - } - s.addBlock(block2) - - header3 := &types.Header{ - ParentHash: header2.Hash(), - Number: 3, - } - block3 := &types.Block{ - Header: *header3, - Body: types.Body{}, - } - s.addBlock(block3) - - header2NotDescendant := &types.Header{ - ParentHash: common.Hash{0xff}, - Number: 2, - } - block2NotDescendant := &types.Block{ - Header: *header2NotDescendant, - Body: types.Body{}, - } - s.addBlock(block2NotDescendant) - - ready := []*types.BlockData{block1.ToBlockData()} - ready = s.getReadyDescendants(header1.Hash(), ready) - require.Equal(t, 3, len(ready)) - require.Equal(t, block1.ToBlockData(), ready[0]) - require.Equal(t, block2.ToBlockData(), ready[1]) - require.Equal(t, block3.ToBlockData(), ready[2]) -} - -func TestDisjointBlockSet_getReadyDescendants_blockNotComplete(t *testing.T) { - s := newDisjointBlockSet(pendingBlocksLimit) - - // test that descendant chain gets returned by getReadyDescendants on block 1 being ready - // the ready list should contain only block 1 and 2, as block 3 is incomplete (body is missing) - header1 := &types.Header{ - Number: 1, - } - block1 := &types.Block{ - Header: *header1, - Body: types.Body{}, - } - - header2 := &types.Header{ - ParentHash: header1.Hash(), - Number: 2, - } - block2 := &types.Block{ - Header: *header2, - Body: types.Body{}, - } - s.addBlock(block2) - - header3 := &types.Header{ - ParentHash: header2.Hash(), - Number: 3, - } - s.addHeader(header3) - - header2NotDescendant := &types.Header{ - ParentHash: common.Hash{0xff}, - Number: 2, - } - block2NotDescendant := &types.Block{ - Header: *header2NotDescendant, - Body: types.Body{}, - } - s.addBlock(block2NotDescendant) - - ready := []*types.BlockData{block1.ToBlockData()} - ready = s.getReadyDescendants(header1.Hash(), ready) - require.Equal(t, 2, len(ready)) - require.Equal(t, block1.ToBlockData(), ready[0]) - require.Equal(t, block2.ToBlockData(), ready[1]) -} - func TestDisjointBlockSet_ClearBlocks(t *testing.T) { s := newDisjointBlockSet(pendingBlocksLimit) diff --git a/dot/sync/errors.go b/dot/sync/errors.go index fa4b4ebcad..564c878422 100644 --- a/dot/sync/errors.go +++ b/dot/sync/errors.go @@ -20,18 +20,15 @@ var ( errRequestStartTooHigh = errors.New("request start number is higher than our best block") // chainSync errors - errEmptyBlockData = errors.New("empty block data") - errNilBlockData = errors.New("block data is nil") - errNilHeaderInResponse = errors.New("expected header, received none") - errNilBodyInResponse = errors.New("expected body, received none") - errNoPeers = errors.New("no peers to sync with") - errResponseIsNotChain = errors.New("block response does not form a chain") - errPeerOnInvalidFork = errors.New("peer is on an invalid fork") - errInvalidDirection = errors.New("direction of request does not match specified start and target") - errUnknownParent = errors.New("parent of first block in block response is unknown") - errUnknownBlockForJustification = errors.New("received justification for unknown block") - errFailedToGetParent = errors.New("failed to get parent header") - errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") - errFailedToGetDescendant = errors.New("failed to find descendant block") - errBadBlock = errors.New("known bad block") + errNoPeerViews = errors.New("unable to get target") + errNilBlockData = errors.New("block data is nil") + errNilHeaderInResponse = errors.New("expected header, received none") + errNilBodyInResponse = errors.New("expected body, received none") + errNilJustificationInResponse = errors.New("expected justification, received none") + errNoPeers = errors.New("no peers to sync with") + errPeerOnInvalidFork = errors.New("peer is on an invalid fork") + errFailedToGetParent = errors.New("failed to get parent header") + errStartAndEndMismatch = errors.New("request start and end hash are not on the same chain") + errFailedToGetDescendant = errors.New("failed to find descendant block") + errAlreadyInDisjointSet = errors.New("already in disjoint set") ) diff --git a/dot/sync/interfaces.go b/dot/sync/interfaces.go index db38d06e3e..311e50390e 100644 --- a/dot/sync/interfaces.go +++ b/dot/sync/interfaces.go @@ -75,6 +75,8 @@ type Network interface { // ReportPeer reports peer based on the peer behaviour. ReportPeer(change peerset.ReputationChange, p peer.ID) + + AllConnectedPeersID() []peer.ID } // Telemetry is the telemetry client to send telemetry messages. diff --git a/dot/sync/message.go b/dot/sync/message.go index 4c1ad50470..b5c5d49363 100644 --- a/dot/sync/message.go +++ b/dot/sync/message.go @@ -11,11 +11,6 @@ import ( "github.com/ChainSafe/gossamer/lib/common" ) -const ( - // maxResponseSize is maximum number of block data a BlockResponse message can contain - maxResponseSize = 128 -) - // CreateBlockResponse creates a block response message from a block request message func (s *Service) CreateBlockResponse(req *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { switch req.Direction { @@ -30,13 +25,13 @@ func (s *Service) CreateBlockResponse(req *network.BlockRequestMessage) (*networ func (s *Service) handleAscendingRequest(req *network.BlockRequestMessage) (*network.BlockResponseMessage, error) { var ( - max uint = maxResponseSize + max uint = network.MaxBlocksInResponse startHash *common.Hash startNumber uint ) // determine maximum response size - if req.Max != nil && *req.Max < maxResponseSize { + if req.Max != nil && *req.Max < network.MaxBlocksInResponse { max = uint(*req.Max) } @@ -107,11 +102,11 @@ func (s *Service) handleDescendingRequest(req *network.BlockRequestMessage) (*ne var ( startHash *common.Hash startNumber uint - max uint = maxResponseSize + max uint = network.MaxBlocksInResponse ) // determine maximum response size - if req.Max != nil && *req.Max < maxResponseSize { + if req.Max != nil && *req.Max < network.MaxBlocksInResponse { max = uint(*req.Max) } diff --git a/dot/sync/message_integration_test.go b/dot/sync/message_integration_test.go index a030f1593a..7b4ff69529 100644 --- a/dot/sync/message_integration_test.go +++ b/dot/sync/message_integration_test.go @@ -48,7 +48,7 @@ func addTestBlocksToState(t *testing.T, depth uint, blockState BlockState) { func TestService_CreateBlockResponse_MaxSize(t *testing.T) { s := newTestSyncer(t) - addTestBlocksToState(t, maxResponseSize*2, s.blockState) + addTestBlocksToState(t, network.MaxBlocksInResponse*2, s.blockState) // test ascending start, err := variadic.NewUint32OrHash(1) @@ -63,11 +63,11 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err := s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) require.Equal(t, uint(1), resp.BlockData[0].Number()) require.Equal(t, uint(128), resp.BlockData[127].Number()) - max := uint32(maxResponseSize + 100) + max := uint32(network.MaxBlocksInResponse + 100) req = &network.BlockRequestMessage{ RequestedData: 3, StartingBlock: *start, @@ -77,7 +77,7 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) require.Equal(t, uint(1), resp.BlockData[0].Number()) require.Equal(t, uint(128), resp.BlockData[127].Number()) @@ -108,11 +108,11 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) require.Equal(t, uint(128), resp.BlockData[0].Number()) require.Equal(t, uint(1), resp.BlockData[127].Number()) - max = uint32(maxResponseSize + 100) + max = uint32(network.MaxBlocksInResponse + 100) start, err = variadic.NewUint32OrHash(uint32(256)) require.NoError(t, err) @@ -125,7 +125,7 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) require.Equal(t, uint(256), resp.BlockData[0].Number()) require.Equal(t, uint(129), resp.BlockData[127].Number()) @@ -146,7 +146,7 @@ func TestService_CreateBlockResponse_MaxSize(t *testing.T) { func TestService_CreateBlockResponse_StartHash(t *testing.T) { s := newTestSyncer(t) - addTestBlocksToState(t, uint(maxResponseSize*2), s.blockState) + addTestBlocksToState(t, uint(network.MaxBlocksInResponse*2), s.blockState) // test ascending with nil endBlockHash startHash, err := s.blockState.GetHashByNumber(1) @@ -164,7 +164,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { resp, err := s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) require.Equal(t, uint(1), resp.BlockData[0].Number()) require.Equal(t, uint(128), resp.BlockData[127].Number()) @@ -201,7 +201,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { require.Equal(t, uint(16), resp.BlockData[0].Number()) require.Equal(t, uint(1), resp.BlockData[15].Number()) - // test descending with nil endBlockHash and start > maxResponseSize + // test descending with nil endBlockHash and start > network.MaxBlocksInResponse startHash, err = s.blockState.GetHashByNumber(256) require.NoError(t, err) @@ -217,7 +217,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, int(maxResponseSize), len(resp.BlockData)) + require.Equal(t, int(network.MaxBlocksInResponse), len(resp.BlockData)) require.Equal(t, uint(256), resp.BlockData[0].Number()) require.Equal(t, uint(129), resp.BlockData[127].Number()) @@ -236,7 +236,7 @@ func TestService_CreateBlockResponse_StartHash(t *testing.T) { resp, err = s.CreateBlockResponse(req) require.NoError(t, err) - require.Equal(t, maxResponseSize, len(resp.BlockData)) + require.Equal(t, network.MaxBlocksInResponse, len(resp.BlockData)) require.Equal(t, uint(128), resp.BlockData[0].Number()) require.Equal(t, uint(1), resp.BlockData[127].Number()) } diff --git a/dot/sync/mock_chain_processor_test.go b/dot/sync/mock_chain_processor_test.go deleted file mode 100644 index fc6b9c1569..0000000000 --- a/dot/sync/mock_chain_processor_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ChainSafe/gossamer/dot/sync (interfaces: ChainProcessor) - -// Package sync is a generated GoMock package. -package sync - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockChainProcessor is a mock of ChainProcessor interface. -type MockChainProcessor struct { - ctrl *gomock.Controller - recorder *MockChainProcessorMockRecorder -} - -// MockChainProcessorMockRecorder is the mock recorder for MockChainProcessor. -type MockChainProcessorMockRecorder struct { - mock *MockChainProcessor -} - -// NewMockChainProcessor creates a new mock instance. -func NewMockChainProcessor(ctrl *gomock.Controller) *MockChainProcessor { - mock := &MockChainProcessor{ctrl: ctrl} - mock.recorder = &MockChainProcessorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockChainProcessor) EXPECT() *MockChainProcessorMockRecorder { - return m.recorder -} - -// processReadyBlocks mocks base method. -func (m *MockChainProcessor) processReadyBlocks() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "processReadyBlocks") -} - -// processReadyBlocks indicates an expected call of processReadyBlocks. -func (mr *MockChainProcessorMockRecorder) processReadyBlocks() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "processReadyBlocks", reflect.TypeOf((*MockChainProcessor)(nil).processReadyBlocks)) -} - -// stop mocks base method. -func (m *MockChainProcessor) stop() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "stop") -} - -// stop indicates an expected call of stop. -func (mr *MockChainProcessorMockRecorder) stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "stop", reflect.TypeOf((*MockChainProcessor)(nil).stop)) -} diff --git a/dot/sync/mock_chain_sync_test.go b/dot/sync/mock_chain_sync_test.go index f89250c252..d7ce880044 100644 --- a/dot/sync/mock_chain_sync_test.go +++ b/dot/sync/mock_chain_sync_test.go @@ -7,94 +7,11 @@ package sync import ( reflect "reflect" - types "github.com/ChainSafe/gossamer/dot/types" common "github.com/ChainSafe/gossamer/lib/common" gomock "github.com/golang/mock/gomock" peer "github.com/libp2p/go-libp2p/core/peer" ) -// MockworkHandler is a mock of workHandler interface. -type MockworkHandler struct { - ctrl *gomock.Controller - recorder *MockworkHandlerMockRecorder -} - -// MockworkHandlerMockRecorder is the mock recorder for MockworkHandler. -type MockworkHandlerMockRecorder struct { - mock *MockworkHandler -} - -// NewMockworkHandler creates a new mock instance. -func NewMockworkHandler(ctrl *gomock.Controller) *MockworkHandler { - mock := &MockworkHandler{ctrl: ctrl} - mock.recorder = &MockworkHandlerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockworkHandler) EXPECT() *MockworkHandlerMockRecorder { - return m.recorder -} - -// handleNewPeerState mocks base method. -func (m *MockworkHandler) handleNewPeerState(arg0 *peerState) (*worker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "handleNewPeerState", arg0) - ret0, _ := ret[0].(*worker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// handleNewPeerState indicates an expected call of handleNewPeerState. -func (mr *MockworkHandlerMockRecorder) handleNewPeerState(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleNewPeerState", reflect.TypeOf((*MockworkHandler)(nil).handleNewPeerState), arg0) -} - -// handleTick mocks base method. -func (m *MockworkHandler) handleTick() ([]*worker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "handleTick") - ret0, _ := ret[0].([]*worker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// handleTick indicates an expected call of handleTick. -func (mr *MockworkHandlerMockRecorder) handleTick() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleTick", reflect.TypeOf((*MockworkHandler)(nil).handleTick)) -} - -// handleWorkerResult mocks base method. -func (m *MockworkHandler) handleWorkerResult(w *worker) (*worker, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "handleWorkerResult", w) - ret0, _ := ret[0].(*worker) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// handleWorkerResult indicates an expected call of handleWorkerResult. -func (mr *MockworkHandlerMockRecorder) handleWorkerResult(w interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "handleWorkerResult", reflect.TypeOf((*MockworkHandler)(nil).handleWorkerResult), w) -} - -// hasCurrentWorker mocks base method. -func (m *MockworkHandler) hasCurrentWorker(arg0 *worker, arg1 map[uint64]*worker) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "hasCurrentWorker", arg0, arg1) - ret0, _ := ret[0].(bool) - return ret0 -} - -// hasCurrentWorker indicates an expected call of hasCurrentWorker. -func (mr *MockworkHandlerMockRecorder) hasCurrentWorker(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasCurrentWorker", reflect.TypeOf((*MockworkHandler)(nil).hasCurrentWorker), arg0, arg1) -} - // MockChainSync is a mock of ChainSync interface. type MockChainSync struct { ctrl *gomock.Controller @@ -133,28 +50,40 @@ func (mr *MockChainSyncMockRecorder) getHighestBlock() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getHighestBlock", reflect.TypeOf((*MockChainSync)(nil).getHighestBlock)) } -// setBlockAnnounce mocks base method. -func (m *MockChainSync) setBlockAnnounce(from peer.ID, header *types.Header) error { +// getSyncMode mocks base method. +func (m *MockChainSync) getSyncMode() chainSyncState { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "setBlockAnnounce", from, header) - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "getSyncMode") + ret0, _ := ret[0].(chainSyncState) return ret0 } -// setBlockAnnounce indicates an expected call of setBlockAnnounce. -func (mr *MockChainSyncMockRecorder) setBlockAnnounce(from, header interface{}) *gomock.Call { +// getSyncMode indicates an expected call of getSyncMode. +func (mr *MockChainSyncMockRecorder) getSyncMode() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).setBlockAnnounce), from, header) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getSyncMode", reflect.TypeOf((*MockChainSync)(nil).getSyncMode)) } -// setPeerHead mocks base method. -func (m *MockChainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) error { +// onBlockAnnounce mocks base method. +func (m *MockChainSync) onBlockAnnounce(arg0 announcedBlock) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "setPeerHead", p, hash, number) + ret := m.ctrl.Call(m, "onBlockAnnounce", arg0) ret0, _ := ret[0].(error) return ret0 } +// onBlockAnnounce indicates an expected call of onBlockAnnounce. +func (mr *MockChainSyncMockRecorder) onBlockAnnounce(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "onBlockAnnounce", reflect.TypeOf((*MockChainSync)(nil).onBlockAnnounce), arg0) +} + +// setPeerHead mocks base method. +func (m *MockChainSync) setPeerHead(p peer.ID, hash common.Hash, number uint) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "setPeerHead", p, hash, number) +} + // setPeerHead indicates an expected call of setPeerHead. func (mr *MockChainSyncMockRecorder) setPeerHead(p, hash, number interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() @@ -184,17 +113,3 @@ func (mr *MockChainSyncMockRecorder) stop() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "stop", reflect.TypeOf((*MockChainSync)(nil).stop)) } - -// syncState mocks base method. -func (m *MockChainSync) syncState() chainSyncState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "syncState") - ret0, _ := ret[0].(chainSyncState) - return ret0 -} - -// syncState indicates an expected call of syncState. -func (mr *MockChainSyncMockRecorder) syncState() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "syncState", reflect.TypeOf((*MockChainSync)(nil).syncState)) -} diff --git a/dot/sync/mock_disjoint_block_set_test.go b/dot/sync/mock_disjoint_block_set_test.go index 07b5578dd9..d26ef0644a 100644 --- a/dot/sync/mock_disjoint_block_set_test.go +++ b/dot/sync/mock_disjoint_block_set_test.go @@ -119,18 +119,18 @@ func (mr *MockDisjointBlockSetMockRecorder) getBlocks() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getBlocks", reflect.TypeOf((*MockDisjointBlockSet)(nil).getBlocks)) } -// getReadyDescendants mocks base method. -func (m *MockDisjointBlockSet) getReadyDescendants(arg0 common.Hash, arg1 []*types.BlockData) []*types.BlockData { +// hasBlock mocks base method. +func (m *MockDisjointBlockSet) hasBlock(arg0 common.Hash) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getReadyDescendants", arg0, arg1) - ret0, _ := ret[0].([]*types.BlockData) + ret := m.ctrl.Call(m, "hasBlock", arg0) + ret0, _ := ret[0].(bool) return ret0 } -// getReadyDescendants indicates an expected call of getReadyDescendants. -func (mr *MockDisjointBlockSetMockRecorder) getReadyDescendants(arg0, arg1 interface{}) *gomock.Call { +// hasBlock indicates an expected call of hasBlock. +func (mr *MockDisjointBlockSetMockRecorder) hasBlock(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getReadyDescendants", reflect.TypeOf((*MockDisjointBlockSet)(nil).getReadyDescendants), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "hasBlock", reflect.TypeOf((*MockDisjointBlockSet)(nil).hasBlock), arg0) } // removeBlock mocks base method. @@ -158,15 +158,15 @@ func (mr *MockDisjointBlockSetMockRecorder) removeLowerBlocks(arg0 interface{}) } // run mocks base method. -func (m *MockDisjointBlockSet) run(arg0 <-chan struct{}) { +func (m *MockDisjointBlockSet) run(arg0 <-chan *types.FinalisationInfo, arg1 <-chan struct{}) { m.ctrl.T.Helper() - m.ctrl.Call(m, "run", arg0) + m.ctrl.Call(m, "run", arg0, arg1) } // run indicates an expected call of run. -func (mr *MockDisjointBlockSetMockRecorder) run(arg0 interface{}) *gomock.Call { +func (mr *MockDisjointBlockSetMockRecorder) run(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "run", reflect.TypeOf((*MockDisjointBlockSet)(nil).run), arg0, arg1) } // size mocks base method. diff --git a/dot/sync/mock_req_res.go b/dot/sync/mock_request.go similarity index 100% rename from dot/sync/mock_req_res.go rename to dot/sync/mock_request.go diff --git a/dot/sync/mocks_generate_test.go b/dot/sync/mocks_generate_test.go index 7d4e8cb064..e970742556 100644 --- a/dot/sync/mocks_generate_test.go +++ b/dot/sync/mocks_generate_test.go @@ -6,7 +6,6 @@ package sync //go:generate mockgen -destination=mocks_test.go -package=$GOPACKAGE . BlockState,StorageState,TransactionState,BabeVerifier,FinalityGadget,BlockImportHandler,Network //go:generate mockgen -destination=mock_telemetry_test.go -package $GOPACKAGE . Telemetry //go:generate mockgen -destination=mock_runtime_test.go -package $GOPACKAGE github.com/ChainSafe/gossamer/lib/runtime Instance -//go:generate mockgen -destination=mock_req_res.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/network RequestMaker -//go:generate mockgen -destination=mock_chain_processor_test.go -package=$GOPACKAGE . ChainProcessor -//go:generate mockgen -destination=mock_chain_sync_test.go -package $GOPACKAGE -source chain_sync.go . ChainSync,workHandler +//go:generate mockgen -destination=mock_chain_sync_test.go -package $GOPACKAGE -source chain_sync.go . ChainSync //go:generate mockgen -destination=mock_disjoint_block_set_test.go -package=$GOPACKAGE . DisjointBlockSet +//go:generate mockgen -destination=mock_request.go -package $GOPACKAGE github.com/ChainSafe/gossamer/dot/network RequestMaker diff --git a/dot/sync/mocks_test.go b/dot/sync/mocks_test.go index 57a85eb954..971e559359 100644 --- a/dot/sync/mocks_test.go +++ b/dot/sync/mocks_test.go @@ -608,6 +608,20 @@ func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { return m.recorder } +// AllConnectedPeersID mocks base method. +func (m *MockNetwork) AllConnectedPeersID() []peer.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllConnectedPeersID") + ret0, _ := ret[0].([]peer.ID) + return ret0 +} + +// AllConnectedPeersID indicates an expected call of AllConnectedPeersID. +func (mr *MockNetworkMockRecorder) AllConnectedPeersID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllConnectedPeersID", reflect.TypeOf((*MockNetwork)(nil).AllConnectedPeersID)) +} + // Peers mocks base method. func (m *MockNetwork) Peers() []common.PeerInfo { m.ctrl.T.Helper() diff --git a/dot/sync/syncer.go b/dot/sync/syncer.go index 8cb53ef044..13374f3b8a 100644 --- a/dot/sync/syncer.go +++ b/dot/sync/syncer.go @@ -4,10 +4,14 @@ package sync import ( + "errors" + "fmt" "time" "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" + "github.com/cockroachdb/pebble" "github.com/ChainSafe/gossamer/internal/log" "github.com/libp2p/go-libp2p/core/peer" @@ -17,10 +21,9 @@ var logger = log.NewFromGlobal(log.AddContext("pkg", "sync")) // Service deals with chain syncing by sending block request messages and watching for responses. type Service struct { - blockState BlockState - chainSync ChainSync - chainProcessor ChainProcessor - network Network + blockState BlockState + chainSync ChainSync + network Network } // Config is the configuration for the sync Service. @@ -37,31 +40,22 @@ type Config struct { SlotDuration time.Duration Telemetry Telemetry BadBlocks []string + RequestMaker network.RequestMaker } // NewService returns a new *sync.Service -func NewService(cfg *Config, blockReqRes network.RequestMaker) (*Service, error) { +func NewService(cfg *Config) (*Service, error) { logger.Patch(log.SetLevel(cfg.LogLvl)) - readyBlocks := newBlockQueue(maxResponseSize * 30) pendingBlocks := newDisjointBlockSet(pendingBlocksLimit) csCfg := chainSyncConfig{ - bs: cfg.BlockState, - net: cfg.Network, - readyBlocks: readyBlocks, - pendingBlocks: pendingBlocks, - minPeers: cfg.MinPeers, - maxPeers: cfg.MaxPeers, - slotDuration: cfg.SlotDuration, - } - chainSync := newChainSync(csCfg, blockReqRes) - - cpCfg := chainProcessorConfig{ - readyBlocks: readyBlocks, + bs: cfg.BlockState, + net: cfg.Network, pendingBlocks: pendingBlocks, - syncer: chainSync, - blockState: cfg.BlockState, + minPeers: cfg.MinPeers, + maxPeers: cfg.MaxPeers, + slotDuration: cfg.SlotDuration, storageState: cfg.StorageState, transactionState: cfg.TransactionState, babeVerifier: cfg.BabeVerifier, @@ -69,47 +63,109 @@ func NewService(cfg *Config, blockReqRes network.RequestMaker) (*Service, error) blockImportHandler: cfg.BlockImportHandler, telemetry: cfg.Telemetry, badBlocks: cfg.BadBlocks, + requestMaker: cfg.RequestMaker, } - chainProcessor := newChainProcessor(cpCfg) + chainSync := newChainSync(csCfg) return &Service{ - blockState: cfg.BlockState, - chainSync: chainSync, - chainProcessor: chainProcessor, - network: cfg.Network, + blockState: cfg.BlockState, + chainSync: chainSync, + network: cfg.Network, }, nil } // Start begins the chainSync and chainProcessor modules. It begins syncing in bootstrap mode func (s *Service) Start() error { go s.chainSync.start() - go s.chainProcessor.processReadyBlocks() return nil } // Stop stops the chainSync and chainProcessor modules func (s *Service) Stop() error { s.chainSync.stop() - s.chainProcessor.stop() return nil } // HandleBlockAnnounceHandshake notifies the `chainSync` module that // we have received a BlockAnnounceHandshake from the given peer. func (s *Service) HandleBlockAnnounceHandshake(from peer.ID, msg *network.BlockAnnounceHandshake) error { - return s.chainSync.setPeerHead(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) + s.chainSync.setPeerHead(from, msg.BestBlockHash, uint(msg.BestBlockNumber)) + return nil } // HandleBlockAnnounce notifies the `chainSync` module that we have received a block announcement from the given peer. func (s *Service) HandleBlockAnnounce(from peer.ID, msg *network.BlockAnnounceMessage) error { logger.Debug("received BlockAnnounceMessage") - header := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) - return s.chainSync.setBlockAnnounce(from, header) + blockAnnounceHeader := types.NewHeader(msg.ParentHash, msg.StateRoot, msg.ExtrinsicsRoot, msg.Number, msg.Digest) + blockAnnounceHeaderHash := blockAnnounceHeader.Hash() + + // if the peer reports a lower or equal best block number than us, + // check if they are on a fork or not + bestBlockHeader, err := s.blockState.BestBlockHeader() + if err != nil { + return fmt.Errorf("best block header: %w", err) + } + + if blockAnnounceHeader.Number <= bestBlockHeader.Number { + // check if our block hash for that number is the same, if so, do nothing + // as we already have that block + ourHash, err := s.blockState.GetHashByNumber(blockAnnounceHeader.Number) + if err != nil && !errors.Is(err, pebble.ErrNotFound) { + return fmt.Errorf("get block hash by number: %w", err) + } + + if ourHash == blockAnnounceHeaderHash { + return nil + } + + // check if their best block is on an invalid chain, if it is, + // potentially downscore them + // for now, we can remove them from the syncing peers set + fin, err := s.blockState.GetHighestFinalisedHeader() + if err != nil { + return fmt.Errorf("get highest finalised header: %w", err) + } + + // their block hash doesn't match ours for that number (ie. they are on a different + // chain), and also the highest finalised block is higher than that number. + // thus the peer is on an invalid chain + if fin.Number >= blockAnnounceHeader.Number && msg.BestBlock { + // TODO: downscore this peer, or temporarily don't sync from them? (#1399) + // perhaps we need another field in `peerState` to mark whether the state is valid or not + s.network.ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, from) + return fmt.Errorf("%w: for peer %s and block number %d", + errPeerOnInvalidFork, from, blockAnnounceHeader.Number) + } + + // peer is on a fork, check if we have processed the fork already or not + // ie. is their block written to our db? + has, err := s.blockState.HasHeader(blockAnnounceHeaderHash) + if err != nil { + return fmt.Errorf("while checking if header exists: %w", err) + } + + // if so, do nothing, as we already have their fork + if has { + return nil + } + } + + // we assume that if a peer sends us a block announce for a certain block, + // that is also has the chain up until and including that block. + // this may not be a valid assumption, but perhaps we can assume that + // it is likely they will receive this block and its ancestors before us. + return s.chainSync.onBlockAnnounce(announcedBlock{ + who: from, + header: blockAnnounceHeader, + }) } // IsSynced exposes the synced state func (s *Service) IsSynced() bool { - return s.chainSync.syncState() == tip + return s.chainSync.getSyncMode() == tip } // HighestBlock gets the highest known block number diff --git a/dot/sync/syncer_integration_test.go b/dot/sync/syncer_integration_test.go index 4ad929948d..92d4f12970 100644 --- a/dot/sync/syncer_integration_test.go +++ b/dot/sync/syncer_integration_test.go @@ -117,8 +117,8 @@ func newTestSyncer(t *testing.T) *Service { cfg.FinalityGadget = mockFinalityGadget cfg.Network = NewMockNetwork(ctrl) cfg.Telemetry = mockTelemetryClient - mockReqRes := NewMockRequestMaker(ctrl) - syncer, err := NewService(cfg, mockReqRes) + cfg.RequestMaker = NewMockRequestMaker(ctrl) + syncer, err := NewService(cfg) require.NoError(t, err) return syncer } diff --git a/dot/sync/syncer_test.go b/dot/sync/syncer_test.go index 750d8886e1..5e0573f805 100644 --- a/dot/sync/syncer_test.go +++ b/dot/sync/syncer_test.go @@ -9,12 +9,14 @@ import ( "testing" "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewService(t *testing.T) { @@ -46,9 +48,8 @@ func TestNewService(t *testing.T) { ctrl := gomock.NewController(t) config := tt.cfgBuilder(ctrl) - mockReqRes := NewMockRequestMaker(ctrl) - got, err := NewService(config, mockReqRes) + got, err := NewService(config) if tt.err != nil { assert.EqualError(t, err, tt.err.Error()) } else { @@ -64,133 +65,238 @@ func TestNewService(t *testing.T) { func TestService_HandleBlockAnnounce(t *testing.T) { t.Parallel() - ctrl := gomock.NewController(t) + errTest := errors.New("test error") + const somePeer = peer.ID("abc") - type fields struct { - chainSync ChainSync - } - type args struct { - from peer.ID - msg *network.BlockAnnounceMessage - } - tests := []struct { - name string - fields fields - args args - wantErr bool + block1AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, + common.Hash{}, 1, scale.VaryingDataTypeSlice{}) + block2AnnounceHeader := types.NewHeader(common.Hash{}, common.Hash{}, + common.Hash{}, 2, scale.VaryingDataTypeSlice{}) + + testCases := map[string]struct { + serviceBuilder func(ctrl *gomock.Controller) *Service + peerID peer.ID + blockAnnounceHeader *types.Header + errWrapped error + errMessage string }{ - { - name: "working_example", - fields: fields{ - chainSync: newMockChainSync(ctrl), - }, - args: args{ - from: peer.ID("1"), - msg: &network.BlockAnnounceMessage{ - ParentHash: common.Hash{}, - Number: 1, - StateRoot: common.Hash{}, - ExtrinsicsRoot: common.Hash{}, - Digest: scale.VaryingDataTypeSlice{}, - BestBlock: false, - }, + "best_block_header_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + blockState.EXPECT().BestBlockHeader().Return(nil, errTest) + return &Service{ + blockState: blockState, + } }, - wantErr: false, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "best block header: test error", }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - s := &Service{ - chainSync: tt.fields.chainSync, - } - if err := s.HandleBlockAnnounce(tt.args.from, tt.args.msg); (err != nil) != tt.wantErr { - t.Errorf("HandleBlockAnnounce() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func newMockChainSync(ctrl *gomock.Controller) ChainSync { - mock := NewMockChainSync(ctrl) - header := types.NewHeader(common.Hash{}, common.Hash{}, common.Hash{}, 1, - scale.VaryingDataTypeSlice{}) - - mock.EXPECT().setBlockAnnounce(peer.ID("1"), header).Return(nil).AnyTimes() - mock.EXPECT().setPeerHead(peer.ID("1"), common.Hash{}, uint(0)).Return(nil).AnyTimes() - mock.EXPECT().syncState().Return(bootstrap).AnyTimes() - mock.EXPECT().start().AnyTimes() - mock.EXPECT().stop().AnyTimes() - mock.EXPECT().getHighestBlock().Return(uint(2), nil).AnyTimes() - - return mock -} - -func Test_Service_HandleBlockAnnounceHandshake(t *testing.T) { - t.Parallel() + "number_smaller_than_best_block_number_get_hash_by_number_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{}, errTest) - errTest := errors.New("test error") + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "get block hash by number: test error", + }, + "number_smaller_than_best_block_number_and_same_hash": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(block1AnnounceHeader.Hash(), nil) + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + }, + "number_smaller_than_best_block_number_get_highest_finalised_header_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)).Return(common.Hash{2}, nil) + blockState.EXPECT().GetHighestFinalisedHeader().Return(nil, errTest) + return &Service{ + blockState: blockState, + } + }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errTest, + errMessage: "get highest finalised header: test error", + }, + "number_smaller_than_best_block_announced_number_equaks_finalised_number": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) - testCases := map[string]struct { - serviceBuilder func(ctrl *gomock.Controller) Service - from peer.ID - message *network.BlockAnnounceHandshake - errWrapped error - errMessage string - }{ - "success": { - serviceBuilder: func(ctrl *gomock.Controller) Service { - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().setPeerHead(peer.ID("abc"), common.Hash{1}, uint(2)). - Return(nil) - return Service{ - chainSync: chainSync, + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &Service{ + blockState: blockState, + network: network, } }, - from: peer.ID("abc"), - message: &network.BlockAnnounceHandshake{ - BestBlockHash: common.Hash{1}, - BestBlockNumber: 2, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", + }, + "number_smaller_than_best_block_number_and_finalised_number_bigger_than_number": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(1)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 2} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + network := NewMockNetwork(ctrl) + network.EXPECT().ReportPeer(peerset.ReputationChange{ + Value: peerset.BadBlockAnnouncementValue, + Reason: peerset.BadBlockAnnouncementReason, + }, somePeer) + return &Service{ + blockState: blockState, + network: network, + } }, + peerID: somePeer, + blockAnnounceHeader: block1AnnounceHeader, + errWrapped: errPeerOnInvalidFork, + errMessage: "peer is on an invalid fork: for peer ZiCa and block number 1", }, - "failure": { - serviceBuilder: func(ctrl *gomock.Controller) Service { - chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().setPeerHead(peer.ID("abc"), common.Hash{1}, uint(2)). - Return(errTest) - return Service{ - chainSync: chainSync, + "number_smaller_than_best_block_number_and_" + + "finalised_number_smaller_than_number_and_" + + "has_header_error": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{5, 1, 2}, nil) // other hash than block2AnnounceHeader hash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(false, errTest) + return &Service{ + blockState: blockState, } }, - from: peer.ID("abc"), - message: &network.BlockAnnounceHandshake{ - BestBlockHash: common.Hash{1}, - BestBlockNumber: 2, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + errWrapped: errTest, + errMessage: "while checking if header exists: test error", + }, + "number_smaller_than_best_block_number_and_" + + "finalised_number_smaller_than_number_and_" + + "has_the_hash": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 3} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + blockState.EXPECT().GetHashByNumber(uint(2)). + Return(common.Hash{2}, nil) // other hash than someHash + finalisedBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().GetHighestFinalisedHeader().Return(finalisedBlockHeader, nil) + blockState.EXPECT().HasHeader(block2AnnounceHeader.Hash()).Return(true, nil) + return &Service{ + blockState: blockState, + } }, - errWrapped: errTest, - errMessage: "test error", + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, + }, + "number_bigger_than_best_block_number_added_in_disjoint_set_with_success": { + serviceBuilder: func(ctrl *gomock.Controller) *Service { + + blockState := NewMockBlockState(ctrl) + bestBlockHeader := &types.Header{Number: 1} + blockState.EXPECT().BestBlockHeader().Return(bestBlockHeader, nil) + chainSyncMock := NewMockChainSync(ctrl) + + expectedAnnouncedBlock := announcedBlock{ + who: somePeer, + header: block2AnnounceHeader, + } + + chainSyncMock.EXPECT().onBlockAnnounce(expectedAnnouncedBlock).Return(nil) + + return &Service{ + blockState: blockState, + chainSync: chainSyncMock, + } + }, + peerID: somePeer, + blockAnnounceHeader: block2AnnounceHeader, }, } - for name, testCase := range testCases { - testCase := testCase + for name, tt := range testCases { + tt := tt t.Run(name, func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) - service := testCase.serviceBuilder(ctrl) - - err := service.HandleBlockAnnounceHandshake(testCase.from, testCase.message) + service := tt.serviceBuilder(ctrl) - assert.ErrorIs(t, err, testCase.errWrapped) - if testCase.errWrapped != nil { - assert.EqualError(t, err, testCase.errMessage) + blockAnnounceMessage := &network.BlockAnnounceMessage{ + ParentHash: tt.blockAnnounceHeader.ParentHash, + Number: tt.blockAnnounceHeader.Number, + StateRoot: tt.blockAnnounceHeader.StateRoot, + ExtrinsicsRoot: tt.blockAnnounceHeader.ExtrinsicsRoot, + Digest: tt.blockAnnounceHeader.Digest, + BestBlock: true, + } + err := service.HandleBlockAnnounce(tt.peerID, blockAnnounceMessage) + assert.ErrorIs(t, err, tt.errWrapped) + if tt.errWrapped != nil { + assert.EqualError(t, err, tt.errMessage) } }) } } +func Test_Service_HandleBlockAnnounceHandshake(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + chainSync := NewMockChainSync(ctrl) + chainSync.EXPECT().setPeerHead(peer.ID("peer"), common.Hash{1}, uint(2)) + + service := Service{ + chainSync: chainSync, + } + + message := &network.BlockAnnounceHandshake{ + BestBlockHash: common.Hash{1}, + BestBlockNumber: 2, + } + + err := service.HandleBlockAnnounceHandshake(peer.ID("peer"), message) + require.Nil(t, err) +} + func TestService_IsSynced(t *testing.T) { t.Parallel() @@ -201,7 +307,7 @@ func TestService_IsSynced(t *testing.T) { "tip": { serviceBuilder: func(ctrl *gomock.Controller) Service { chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().syncState().Return(tip) + chainSync.EXPECT().getSyncMode().Return(tip) return Service{ chainSync: chainSync, } @@ -211,7 +317,7 @@ func TestService_IsSynced(t *testing.T) { "not_tip": { serviceBuilder: func(ctrl *gomock.Controller) Service { chainSync := NewMockChainSync(ctrl) - chainSync.EXPECT().syncState().Return(bootstrap) + chainSync.EXPECT().getSyncMode().Return(bootstrap) return Service{ chainSync: chainSync, } @@ -246,15 +352,8 @@ func TestService_Start(t *testing.T) { allCalled.Done() }) - chainProcessor := NewMockChainProcessor(ctrl) - allCalled.Add(1) - chainProcessor.EXPECT().processReadyBlocks().DoAndReturn(func() { - allCalled.Done() - }) - service := Service{ - chainSync: chainSync, - chainProcessor: chainProcessor, + chainSync: chainSync, } err := service.Start() @@ -268,12 +367,8 @@ func TestService_Stop(t *testing.T) { chainSync := NewMockChainSync(ctrl) chainSync.EXPECT().stop() - chainProcessor := NewMockChainProcessor(ctrl) - chainProcessor.EXPECT().stop() - service := &Service{ - chainSync: chainSync, - chainProcessor: chainProcessor, + chainSync: chainSync, } err := service.Stop() diff --git a/dot/sync/tip_syncer.go b/dot/sync/tip_syncer.go deleted file mode 100644 index 00d2318cff..0000000000 --- a/dot/sync/tip_syncer.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "errors" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" -) - -var _ workHandler = &tipSyncer{} - -type handleReadyBlockFunc func(*types.BlockData) - -// tipSyncer handles workers when syncing at the tip of the chain -type tipSyncer struct { - blockState BlockState - pendingBlocks DisjointBlockSet - readyBlocks *blockQueue - handleReadyBlock handleReadyBlockFunc -} - -func newTipSyncer(blockState BlockState, pendingBlocks DisjointBlockSet, readyBlocks *blockQueue, - handleReadyBlock handleReadyBlockFunc) *tipSyncer { - return &tipSyncer{ - blockState: blockState, - pendingBlocks: pendingBlocks, - readyBlocks: readyBlocks, - handleReadyBlock: handleReadyBlock, - } -} - -func (s *tipSyncer) handleNewPeerState(ps *peerState) (*worker, error) { - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - if ps.number <= fin.Number { - return nil, nil //nolint:nilnil - } - - return &worker{ - startHash: ps.hash, - startNumber: uintPtr(ps.number), - targetHash: ps.hash, - targetNumber: uintPtr(ps.number), - requestData: bootstrapRequestData, - }, nil -} - -//nolint:nilnil -func (s *tipSyncer) handleWorkerResult(res *worker) ( - workerToRetry *worker, err error) { - if res.err == nil { - return nil, nil - } - - if errors.Is(res.err.err, errUnknownParent) { - // handleTick will handle the errUnknownParent case - return nil, nil - } - - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - // don't retry if we're requesting blocks lower than finalised - switch res.direction { - case network.Ascending: - if *res.targetNumber <= fin.Number { - return nil, nil - } - - // if start is lower than finalised, increase it to finalised+1 - if *res.startNumber <= fin.Number { - *res.startNumber = fin.Number + 1 - res.startHash = common.Hash{} - } - case network.Descending: - if *res.startNumber <= fin.Number { - return nil, nil - } - - // if target is lower than finalised, increase it to finalised+1 - if *res.targetNumber <= fin.Number { - *res.targetNumber = fin.Number + 1 - res.targetHash = common.Hash{} - } - } - - return &worker{ - startHash: res.startHash, - startNumber: res.startNumber, - targetHash: res.targetHash, - targetNumber: res.targetNumber, - direction: res.direction, - requestData: res.requestData, - }, nil -} - -func (*tipSyncer) hasCurrentWorker(w *worker, workers map[uint64]*worker) bool { - if w == nil || w.startNumber == nil || w.targetNumber == nil { - return true - } - - for _, curr := range workers { - if w.direction != curr.direction || w.requestData != curr.requestData { - continue - } - - switch w.direction { - case network.Ascending: - if *w.targetNumber > *curr.targetNumber || - *w.startNumber < *curr.startNumber { - continue - } - case network.Descending: - if *w.targetNumber < *curr.targetNumber || - *w.startNumber > *curr.startNumber { - continue - } - } - - // worker (start, end) is within curr (start, end), if hashes are equal then the request is either - // for the same data or some subset of data that is covered by curr - if w.startHash == curr.startHash || w.targetHash == curr.targetHash { - return true - } - } - - return false -} - -// handleTick traverses the pending blocks set to find which forks still need to be requested -func (s *tipSyncer) handleTick() ([]*worker, error) { - logger.Debugf("handling tick, we have %d pending blocks", s.pendingBlocks.size()) - - if s.pendingBlocks.size() == 0 { - return nil, nil - } - - fin, err := s.blockState.GetHighestFinalisedHeader() - if err != nil { - return nil, err - } - - // cases for each block in pending set: - // 1. only hash and number are known; in this case, request the full block (and ancestor chain) - // 2. only header is known; in this case, request the block body - // 3. entire block is known; in this case, check if we have become aware of the parent - // if we have, move it to the ready blocks queue; otherwise, request the chain of ancestors - - var workers []*worker - - for _, block := range s.pendingBlocks.getBlocks() { - if block.number <= fin.Number { - // delete from pending set (this should not happen, it should have already been deleted) - s.pendingBlocks.removeBlock(block.hash) - continue - } - - logger.Tracef("handling pending block number %d with hash %s", block.number, block.hash) - - if block.header == nil { - // case 1 - workers = append(workers, &worker{ - startHash: block.hash, - startNumber: uintPtr(block.number), - targetHash: fin.Hash(), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: block, - }) - continue - } - - if block.body == nil { - // case 2 - workers = append(workers, &worker{ - startHash: block.hash, - startNumber: uintPtr(block.number), - targetHash: block.hash, - targetNumber: uintPtr(block.number), - requestData: network.RequestedDataBody + network.RequestedDataJustification, - pendingBlock: block, - }) - continue - } - - // case 3 - has, err := s.blockState.HasHeader(block.header.ParentHash) - if err != nil { - return nil, err - } - - if has || s.readyBlocks.has(block.header.ParentHash) { - // block is ready, as parent is known! - // also, move any pendingBlocks that are descendants of this block to the ready blocks queue - s.handleReadyBlock(block.toBlockData()) - continue - } - - // request descending chain from (parent of pending block) -> (last finalised block) - workers = append(workers, &worker{ - startHash: block.header.ParentHash, - startNumber: uintPtr(block.number - 1), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: block, - }) - } - - return workers, nil -} diff --git a/dot/sync/tip_syncer_integration_test.go b/dot/sync/tip_syncer_integration_test.go deleted file mode 100644 index 5e433d73d9..0000000000 --- a/dot/sync/tip_syncer_integration_test.go +++ /dev/null @@ -1,372 +0,0 @@ -//go:build integration - -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "testing" - "time" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/ChainSafe/gossamer/lib/trie" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newTestTipSyncer(t *testing.T) *tipSyncer { - finHeader := types.NewHeader(common.NewHash([]byte{0}), - trie.EmptyHash, trie.EmptyHash, 200, types.NewDigest()) - - ctrl := gomock.NewController(t) - bs := NewMockBlockState(ctrl) - bs.EXPECT().GetHighestFinalisedHeader().Return(finHeader, nil).AnyTimes() - bs.EXPECT().HasHeader(gomock.AssignableToTypeOf(common.Hash{})).Return(true, nil).AnyTimes() - - readyBlocks := newBlockQueue(maxResponseSize) - pendingBlocks := newDisjointBlockSet(pendingBlocksLimit) - return newTipSyncer(bs, pendingBlocks, readyBlocks, nil) -} - -func TestTipSyncer_handleNewPeerState(t *testing.T) { - s := newTestTipSyncer(t) - - // peer reports state lower than our highest finalised, we should ignore - ps := &peerState{ - number: 1, - } - - w, err := s.handleNewPeerState(ps) - require.NoError(t, err) - require.Nil(t, w) - - ps = &peerState{ - number: 201, - hash: common.Hash{0xa, 0xb}, - } - - // otherwise, return a worker - expected := &worker{ - startNumber: uintPtr(ps.number), - startHash: ps.hash, - targetNumber: uintPtr(ps.number), - targetHash: ps.hash, - requestData: bootstrapRequestData, - } - - w, err = s.handleNewPeerState(ps) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestTipSyncer_handleWorkerResult(t *testing.T) { - s := newTestTipSyncer(t) - - w, err := s.handleWorkerResult(&worker{}) - require.NoError(t, err) - require.Nil(t, w) - - w, err = s.handleWorkerResult(&worker{ - err: &workerError{ - err: errUnknownParent, - }, - }) - require.NoError(t, err) - require.Nil(t, w) - - // worker is for blocks lower than finalised - w, err = s.handleWorkerResult(&worker{ - targetNumber: uintPtr(199), - }) - require.NoError(t, err) - require.Nil(t, w) - - w, err = s.handleWorkerResult(&worker{ - direction: network.Descending, - startNumber: uintPtr(199), - }) - require.NoError(t, err) - require.Nil(t, w) - - // worker start is lower than finalised, start should be updated - expected := &worker{ - direction: network.Ascending, - startNumber: uintPtr(201), - targetNumber: uintPtr(300), - requestData: bootstrapRequestData, - } - - w, err = s.handleWorkerResult(&worker{ - direction: network.Ascending, - startNumber: uintPtr(199), - targetNumber: uintPtr(300), - requestData: bootstrapRequestData, - err: &workerError{}, - }) - require.NoError(t, err) - require.Equal(t, expected, w) - - expected = &worker{ - direction: network.Descending, - startNumber: uintPtr(300), - targetNumber: uintPtr(201), - requestData: bootstrapRequestData, - } - - w, err = s.handleWorkerResult(&worker{ - direction: network.Descending, - startNumber: uintPtr(300), - targetNumber: uintPtr(199), - requestData: bootstrapRequestData, - err: &workerError{}, - }) - require.NoError(t, err) - require.Equal(t, expected, w) - - // start and target are higher than finalised, don't modify - expected = &worker{ - direction: network.Descending, - startNumber: uintPtr(300), - startHash: common.Hash{0xa, 0xb}, - targetNumber: uintPtr(201), - targetHash: common.Hash{0xc, 0xd}, - requestData: bootstrapRequestData, - } - - w, err = s.handleWorkerResult(&worker{ - direction: network.Descending, - startNumber: uintPtr(300), - startHash: common.Hash{0xa, 0xb}, - targetNumber: uintPtr(201), - targetHash: common.Hash{0xc, 0xd}, - requestData: bootstrapRequestData, - err: &workerError{}, - }) - require.NoError(t, err) - require.Equal(t, expected, w) -} - -func TestTipSyncer_handleTick_case1(t *testing.T) { - s := newTestTipSyncer(t) - - w, err := s.handleTick() - require.NoError(t, err) - require.Nil(t, w) - - fin, _ := s.blockState.GetHighestFinalisedHeader() - - // add pending blocks w/ only hash and number, lower than finalised should be removed - s.pendingBlocks.addHashAndNumber(common.Hash{0xa}, fin.Number) - s.pendingBlocks.addHashAndNumber(common.Hash{0xb}, fin.Number+1) - - expected := []*worker{ - { - startHash: common.Hash{0xb}, - startNumber: uintPtr(fin.Number + 1), - targetHash: fin.Hash(), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: &pendingBlock{ - hash: common.Hash{0xb}, - number: 201, - clearAt: time.Unix(0, 0), - }, - }, - } - w, err = s.handleTick() - require.NoError(t, err) - require.NotEmpty(t, w) - assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) - w[0].pendingBlock.clearAt = time.Unix(0, 0) - require.Equal(t, expected, w) - require.False(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(common.Hash{0xa})) - require.True(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(common.Hash{0xb})) -} - -func TestTipSyncer_handleTick_case2(t *testing.T) { - s := newTestTipSyncer(t) - - fin, _ := s.blockState.GetHighestFinalisedHeader() - - // add pending blocks w/ only header - header := &types.Header{ - Number: fin.Number + 1, - } - s.pendingBlocks.addHeader(header) - - expected := []*worker{ - { - startHash: header.Hash(), - startNumber: uintPtr(header.Number), - targetHash: header.Hash(), - targetNumber: uintPtr(header.Number), - direction: network.Ascending, - requestData: network.RequestedDataBody + network.RequestedDataJustification, - pendingBlock: &pendingBlock{ - hash: header.Hash(), - number: 201, - header: header, - clearAt: time.Time{}, - }, - }, - } - w, err := s.handleTick() - require.NoError(t, err) - require.NotEmpty(t, w) - assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) - w[0].pendingBlock.clearAt = time.Time{} - require.Equal(t, expected, w) - require.True(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) -} -func TestTipSyncer_handleTick_case3(t *testing.T) { - s := newTestTipSyncer(t) - s.handleReadyBlock = func(data *types.BlockData) { - s.pendingBlocks.removeBlock(data.Hash) - s.readyBlocks.push(data) - } - fin, _ := s.blockState.GetHighestFinalisedHeader() - - // add pending block w/ full block, HasHeader will return true, so the block will be processed - header := &types.Header{ - Number: fin.Number + 1, - } - block := &types.Block{ - Header: *header, - Body: types.Body{}, - } - s.pendingBlocks.addBlock(block) - - w, err := s.handleTick() - require.NoError(t, err) - require.Equal(t, []*worker(nil), w) - require.False(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - readyBlockData, err := s.readyBlocks.pop(context.Background()) - require.Equal(t, block.ToBlockData(), readyBlockData) - require.NoError(t, err) - - // add pending block w/ full block, but block is not ready as parent is unknown - ctrl := gomock.NewController(t) - bs := NewMockBlockState(ctrl) - bs.EXPECT().GetHighestFinalisedHeader().Return(fin, nil).Times(2) - bs.EXPECT().HasHeader(gomock.AssignableToTypeOf(common.Hash{})).Return(false, nil).Times(2) - s.blockState = bs - - header = &types.Header{ - Number: fin.Number + 100, - } - block = &types.Block{ - Header: *header, - Body: types.Body{}, - } - s.pendingBlocks.addBlock(block) - - expected := []*worker{ - { - startHash: header.ParentHash, - startNumber: uintPtr(header.Number - 1), - targetNumber: uintPtr(fin.Number), - direction: network.Descending, - requestData: bootstrapRequestData, - pendingBlock: &pendingBlock{ - hash: header.Hash(), - number: 300, - header: header, - body: &types.Body{}, - clearAt: time.Time{}, - }, - }, - } - - w, err = s.handleTick() - require.NoError(t, err) - require.NotEmpty(t, w) - assert.Greater(t, w[0].pendingBlock.clearAt, time.Now()) - w[0].pendingBlock.clearAt = time.Time{} - require.Equal(t, expected, w) - require.True(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - - // add parent block to readyBlocks, should move block to readyBlocks - s.readyBlocks.push(&types.BlockData{ - Hash: header.ParentHash, - }) - w, err = s.handleTick() - require.NoError(t, err) - require.Equal(t, []*worker(nil), w) - require.False(t, s.pendingBlocks.(*disjointBlockSet).hasBlock(header.Hash())) - _, _ = s.readyBlocks.pop(context.Background()) // first pop will remove parent - readyBlockData, err = s.readyBlocks.pop(context.Background()) - require.NoError(t, err) - require.Equal(t, block.ToBlockData(), readyBlockData) -} - -func TestTipSyncer_hasCurrentWorker(t *testing.T) { - s := newTestTipSyncer(t) - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(0), - targetNumber: uintPtr(0), - }, nil)) - - workers := make(map[uint64]*worker) - workers[0] = &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(128), - } - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(129), - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(128), - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(127), - }, workers)) - - workers[0] = &worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(255), - } - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(127), - targetNumber: uintPtr(255), - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(255), - }, workers)) - - workers[0] = &worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(1), - direction: network.Descending, - } - require.False(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(129), - targetNumber: uintPtr(1), - direction: network.Descending, - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(1), - direction: network.Descending, - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(128), - targetNumber: uintPtr(2), - direction: network.Descending, - }, workers)) - require.True(t, s.hasCurrentWorker(&worker{ - startNumber: uintPtr(127), - targetNumber: uintPtr(1), - direction: network.Descending, - }, workers)) -} diff --git a/dot/sync/tip_syncer_test.go b/dot/sync/tip_syncer_test.go deleted file mode 100644 index 09ed2b9eb6..0000000000 --- a/dot/sync/tip_syncer_test.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "testing" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/dot/types" - "github.com/ChainSafe/gossamer/lib/common" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -func Test_tipSyncer_handleNewPeerState(t *testing.T) { - t.Parallel() - - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - pendingBlocks DisjointBlockSet - readyBlocks *blockQueue - } - tests := map[string]struct { - fields fields - peerState *peerState - want *worker - err error - }{ - "peer_state_number_<_final_block_number": { - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - }, - peerState: &peerState{number: 1}, - want: nil, - }, - "base_state": { - fields: fields{ - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - }, - peerState: &peerState{number: 3}, - want: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - requestData: bootstrapRequestData, - }, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &tipSyncer{ - blockState: tt.fields.blockStateBuilder(ctrl), - pendingBlocks: tt.fields.pendingBlocks, - readyBlocks: tt.fields.readyBlocks, - } - got, err := s.handleNewPeerState(tt.peerState) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_tipSyncer_handleTick(t *testing.T) { - t.Parallel() - - type fields struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - pendingBlocksBuilder func(ctrl *gomock.Controller) DisjointBlockSet - readyBlocks *blockQueue - } - tests := map[string]struct { - fields fields - want []*worker - err error - }{ - "base_case": { - fields: fields{ - pendingBlocksBuilder: func(ctrl *gomock.Controller) DisjointBlockSet { - mockDisjointBlockSet := NewMockDisjointBlockSet(ctrl) - mockDisjointBlockSet.EXPECT().size().Return(1).Times(2) - mockDisjointBlockSet.EXPECT().getBlocks().Return([]*pendingBlock{ - {number: 2}, - {number: 3}, - {number: 4, - header: &types.Header{ - Number: 4, - }, - }, - {number: 5, - header: &types.Header{ - Number: 5, - }, - body: &types.Body{}, - }, - }) - mockDisjointBlockSet.EXPECT().removeBlock(common.Hash{}) - return mockDisjointBlockSet - }, - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - mockBlockState.EXPECT().HasHeader(common.Hash{}).Return(false, nil) - return mockBlockState - }, - readyBlocks: newBlockQueue(3), - }, - want: []*worker{ - { - startNumber: uintPtr(3), - targetNumber: uintPtr(2), - targetHash: common.Hash{5, 189, 204, 69, 79, 96, 160, 141, 66, 125, 5, 231, 241, - 159, 36, 15, 220, 57, 31, 87, 10, 183, 111, 203, 150, 236, 202, 11, 88, 35, 211, 191}, - pendingBlock: &pendingBlock{number: 3}, - requestData: bootstrapRequestData, - direction: network.Descending, - }, - { - startNumber: uintPtr(4), - targetNumber: uintPtr(4), - pendingBlock: &pendingBlock{ - number: 4, - header: &types.Header{ - Number: 4, - }, - }, - requestData: network.RequestedDataBody + network.RequestedDataJustification, - }, - { - startNumber: uintPtr(4), - targetNumber: uintPtr(2), - direction: network.Descending, - pendingBlock: &pendingBlock{ - number: 5, - header: &types.Header{ - Number: 5, - }, - body: &types.Body{}, - }, - requestData: bootstrapRequestData, - }, - }, - err: nil, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &tipSyncer{ - blockState: tt.fields.blockStateBuilder(ctrl), - pendingBlocks: tt.fields.pendingBlocksBuilder(ctrl), - readyBlocks: tt.fields.readyBlocks, - } - got, err := s.handleTick() - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_tipSyncer_handleWorkerResult(t *testing.T) { - t.Parallel() - - tests := map[string]struct { - blockStateBuilder func(ctrl *gomock.Controller) BlockState - res *worker - want *worker - err error - }{ - "worker_error_is_nil": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return NewMockBlockState(ctrl) - }, - res: &worker{}, - want: nil, - err: nil, - }, - "worker_error_is_error_unknown_parent": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - return NewMockBlockState(ctrl) - }, - res: &worker{ - err: &workerError{ - err: errUnknownParent, - }, - }, - want: nil, - err: nil, - }, - "ascending,_target_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - targetNumber: uintPtr(1), - direction: network.Ascending, - err: &workerError{}, - }, - want: nil, - err: nil, - }, - "ascending,_start_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - startNumber: uintPtr(1), - targetNumber: uintPtr(3), - direction: network.Ascending, - err: &workerError{}, - }, - want: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - }, - err: nil, - }, - "descending,_start_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - startNumber: uintPtr(1), - direction: network.Descending, - err: &workerError{}, - }, - want: nil, - err: nil, - }, - "descending,_target_number_<_finalised_number": { - blockStateBuilder: func(ctrl *gomock.Controller) BlockState { - mockBlockState := NewMockBlockState(ctrl) - mockBlockState.EXPECT().GetHighestFinalisedHeader().Return(&types.Header{ - Number: 2, - }, nil) - return mockBlockState - }, - res: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(1), - direction: network.Descending, - err: &workerError{}, - }, - want: &worker{ - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - direction: network.Descending, - }, - err: nil, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - ctrl := gomock.NewController(t) - s := &tipSyncer{ - blockState: tt.blockStateBuilder(ctrl), - } - got, err := s.handleWorkerResult(tt.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_tipSyncer_hasCurrentWorker(t *testing.T) { - t.Parallel() - - type args struct { - w *worker - workers map[uint64]*worker - } - tests := map[string]struct { - args args - want bool - }{ - "worker_nil": { - want: true, - }, - "ascending,_false": { - args: args{ - w: &worker{ - direction: network.Ascending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - direction: network.Ascending, - targetNumber: uintPtr(3), - startNumber: uintPtr(3), - }, - }, - }, - want: false, - }, - "ascending,_true": { - args: args{ - w: &worker{ - direction: network.Ascending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - direction: network.Ascending, - targetNumber: uintPtr(3), - startNumber: uintPtr(1), - }, - }, - }, - want: true, - }, - "descending,_false": { - args: args{ - w: &worker{ - direction: network.Descending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - startNumber: uintPtr(3), - targetNumber: uintPtr(3), - direction: network.Descending, - }, - }, - }, - want: false, - }, - "descending,_true": { - args: args{ - w: &worker{ - direction: network.Descending, - startNumber: uintPtr(2), - targetNumber: uintPtr(2), - }, - workers: map[uint64]*worker{ - 1: { - startNumber: uintPtr(3), - targetNumber: uintPtr(1), - direction: network.Descending, - }, - }, - }, - want: true, - }, - } - for name, tt := range tests { - tt := tt - t.Run(name, func(t *testing.T) { - t.Parallel() - s := &tipSyncer{} - got := s.hasCurrentWorker(tt.args.w, tt.args.workers) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/dot/sync/worker.go b/dot/sync/worker.go deleted file mode 100644 index c597623089..0000000000 --- a/dot/sync/worker.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2021 ChainSafe Systems (ON) -// SPDX-License-Identifier: LGPL-3.0-only - -package sync - -import ( - "context" - "sync" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/ChainSafe/gossamer/dot/network" - "github.com/ChainSafe/gossamer/lib/common" -) - -// workerState helps track the current worker set and set the upcoming worker ID -type workerState struct { - ctx context.Context - cancel context.CancelFunc - - sync.Mutex - nextWorker uint64 - workers map[uint64]*worker -} - -func newWorkerState() *workerState { - ctx, cancel := context.WithCancel(context.Background()) - return &workerState{ - ctx: ctx, - cancel: cancel, - workers: make(map[uint64]*worker), - } -} - -func (s *workerState) add(w *worker) { - s.Lock() - defer s.Unlock() - - w.id = s.nextWorker - w.ctx = s.ctx - s.nextWorker++ - s.workers[w.id] = w -} - -func (s *workerState) delete(id uint64) { - s.Lock() - defer s.Unlock() - delete(s.workers, id) -} - -func (s *workerState) reset() { - s.cancel() - s.ctx, s.cancel = context.WithCancel(context.Background()) - - s.Lock() - defer s.Unlock() - - for id := range s.workers { - delete(s.workers, id) - } - s.nextWorker = 0 -} - -// worker respresents a process that is attempting to sync from the specified start block to target block -// if it fails for some reason, `err` is set. -// otherwise, we can assume all the blocks have been received and added to the `readyBlocks` queue -type worker struct { - ctx context.Context - id uint64 - retryCount uint16 - peersTried map[peer.ID]struct{} - - startHash common.Hash - startNumber *uint - targetHash common.Hash - targetNumber *uint - - // if this worker is tied to a specific pending block, this field is set - pendingBlock *pendingBlock - - // bitmap of fields to request - requestData byte - direction network.SyncDirection - - duration time.Duration - err *workerError -} - -type workerError struct { - err error - who peer.ID // whose response caused the error, if any -} - -func uintPtr(n uint) *uint { return &n } diff --git a/dot/sync/worker_pool.go b/dot/sync/worker_pool.go new file mode 100644 index 0000000000..3ce7c576dd --- /dev/null +++ b/dot/sync/worker_pool.go @@ -0,0 +1,287 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "sync" + "time" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/libp2p/go-libp2p/core/peer" +) + +const ( + available byte = iota + busy + punished +) + +const ( + punishmentBaseTimeout = 5 * time.Minute + maxRequestsAllowed uint = 60 +) + +type syncTask struct { + boundTo *peer.ID + request *network.BlockRequestMessage + resultCh chan<- *syncTaskResult +} + +type syncTaskResult struct { + who peer.ID + request *network.BlockRequestMessage + response *network.BlockResponseMessage + err error +} + +type peerSyncWorker struct { + status byte + timesPunished int + punishmentTime time.Time +} + +type syncWorkerPool struct { + wg sync.WaitGroup + mtx sync.RWMutex + doneCh chan struct{} + availableCond *sync.Cond + + network Network + requestMaker network.RequestMaker + taskQueue chan *syncTask + workers map[peer.ID]*peerSyncWorker + ignorePeers map[peer.ID]struct{} +} + +func newSyncWorkerPool(net Network, requestMaker network.RequestMaker) *syncWorkerPool { + swp := &syncWorkerPool{ + network: net, + requestMaker: requestMaker, + doneCh: make(chan struct{}), + workers: make(map[peer.ID]*peerSyncWorker), + taskQueue: make(chan *syncTask, maxRequestsAllowed+1), + ignorePeers: make(map[peer.ID]struct{}), + } + + swp.availableCond = sync.NewCond(&swp.mtx) + return swp +} + +// useConnectedPeers will retrieve all connected peers +// through the network layer and use them as sources of blocks +func (s *syncWorkerPool) useConnectedPeers() { + connectedPeers := s.network.AllConnectedPeersID() + if len(connectedPeers) < 1 { + return + } + + s.mtx.Lock() + defer s.mtx.Unlock() + for _, connectedPeer := range connectedPeers { + s.newPeer(connectedPeer) + } +} + +func (s *syncWorkerPool) fromBlockAnnounce(who peer.ID) { + s.mtx.Lock() + defer s.mtx.Unlock() + s.newPeer(who) +} + +// newPeer a new peer will be included in the worker +// pool if it is not a peer to ignore or is not punished +func (s *syncWorkerPool) newPeer(who peer.ID) { + if _, ok := s.ignorePeers[who]; ok { + return + } + + peerSync, has := s.workers[who] + if !has { + peerSync = &peerSyncWorker{status: available} + s.workers[who] = peerSync + + logger.Tracef("potential worker added, total in the pool %d", len(s.workers)) + } + + // check if the punishment is not valid + if peerSync.status == punished && peerSync.punishmentTime.Before(time.Now()) { + peerSync.status = available + s.workers[who] = peerSync + } +} + +// submitBoundedRequest given a request the worker pool will driven it +// to the given peer.ID, used for tip sync when we receive a block announce +// from a peer and we want to use the exact same peer to request blocks +func (s *syncWorkerPool) submitBoundedRequest(request *network.BlockRequestMessage, + who peer.ID, resultCh chan<- *syncTaskResult) { + s.taskQueue <- &syncTask{ + boundTo: &who, + request: request, + resultCh: resultCh, + } +} + +// submitRequest given a request the worker pool will get the very first available worker +// to perform the request, the response will be dispatch in the resultCh +func (s *syncWorkerPool) submitRequest(request *network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { + s.taskQueue <- &syncTask{ + request: request, + resultCh: resultCh, + } +} + +// submitRequests takes an set of requests and will submit to the pool through submitRequest +// the response will be dispatch in the resultCh +func (s *syncWorkerPool) submitRequests(requests []*network.BlockRequestMessage, resultCh chan<- *syncTaskResult) { + for _, request := range requests { + s.submitRequest(request, resultCh) + } +} + +// punishPeer given a peer.ID we check increase its times punished +// and apply the punishment time using the base timeout of 5m, so +// each time a peer is punished its timeout will increase by 5m +func (s *syncWorkerPool) punishPeer(who peer.ID) { + s.mtx.Lock() + defer s.mtx.Unlock() + + worker, has := s.workers[who] + if !has { + return + } + + timesPunished := worker.timesPunished + 1 + punishmentTime := time.Duration(timesPunished) * punishmentBaseTimeout + logger.Debugf("⏱️ punishement time for peer %s: %.2fs", who, punishmentTime.Seconds()) + + s.workers[who] = &peerSyncWorker{ + status: punished, + timesPunished: timesPunished, + punishmentTime: time.Now().Add(punishmentTime), + } +} + +func (s *syncWorkerPool) ignorePeerAsWorker(who peer.ID) { + s.mtx.Lock() + defer s.mtx.Unlock() + + delete(s.workers, who) + s.ignorePeers[who] = struct{}{} +} + +// totalWorkers only returns available or busy workers +func (s *syncWorkerPool) totalWorkers() (total uint) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + for _, worker := range s.workers { + if worker.status == available { + total += 1 + } + } + + return total +} + +// getAvailablePeer returns the very first peer available, if there +// is no peer avaible then the caller should wait for availablePeerCh +func (s *syncWorkerPool) getAvailablePeer() peer.ID { + for peerID, peerSync := range s.workers { + switch peerSync.status { + case punished: + // if the punishedTime has passed then we can + // use it as an available peer + if peerSync.punishmentTime.Before(time.Now()) { + return peerID + } + case available: + return peerID + default: + } + } + + return peer.ID("") +} + +func (s *syncWorkerPool) getPeerByID(peerID peer.ID) *peerSyncWorker { + peerSync, has := s.workers[peerID] + if !has { + return nil + } + + return peerSync +} + +func (s *syncWorkerPool) listenForRequests(stopCh chan struct{}) { + defer close(s.doneCh) + for { + select { + case <-stopCh: + // wait for ongoing requests to be finished before returning + s.wg.Wait() + return + + case task := <-s.taskQueue: + // whenever a task arrives we try to find an available peer + // if the task is directed at some peer then we will wait for + // that peer to become available, same happens a normal task + // arrives and there is no available peer, then we should wait + // for someone to become free and then use it. + + s.mtx.Lock() + for { + var peerID peer.ID + if task.boundTo != nil { + peerSync := s.getPeerByID(*task.boundTo) + if peerSync != nil && peerSync.status == available { + peerID = *task.boundTo + } + } else { + peerID = s.getAvailablePeer() + } + + if peerID != peer.ID("") { + peerSync := s.workers[peerID] + peerSync.status = busy + s.workers[peerID] = peerSync + + s.mtx.Unlock() + + s.wg.Add(1) + go s.executeRequest(peerID, task) + break + } + + s.availableCond.Wait() + } + } + } +} + +func (s *syncWorkerPool) executeRequest(who peer.ID, task *syncTask) { + defer s.wg.Done() + request := task.request + + logger.Debugf("[EXECUTING] worker %s, block request: %s", who, request) + response := new(network.BlockResponseMessage) + err := s.requestMaker.Do(who, request, response) + logger.Debugf("[FINISHED] worker %s, err: %s, block data amount: %d", who, err, len(response.BlockData)) + + s.mtx.Lock() + peerSync, has := s.workers[who] + if has { + peerSync.status = available + s.workers[who] = peerSync + } + s.mtx.Unlock() + s.availableCond.Signal() + + task.resultCh <- &syncTaskResult{ + who: who, + request: request, + response: response, + err: err, + } +} diff --git a/dot/sync/worker_pool_test.go b/dot/sync/worker_pool_test.go new file mode 100644 index 0000000000..98028aa6c1 --- /dev/null +++ b/dot/sync/worker_pool_test.go @@ -0,0 +1,376 @@ +// Copyright 2023 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package sync + +import ( + "testing" + "time" + + "github.com/ChainSafe/gossamer/dot/network" + "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/lib/common" + "github.com/ChainSafe/gossamer/lib/common/variadic" + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" +) + +func TestSyncWorkerPool_useConnectedPeers(t *testing.T) { + t.Parallel() + stablePunishmentTime := time.Now().Add(time.Minute * 2) + + cases := map[string]struct { + setupWorkerPool func(t *testing.T) *syncWorkerPool + expectedPool map[peer.ID]*peerSyncWorker + }{ + "no_connected_peers": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{}) + + return newSyncWorkerPool(networkMock, nil) + }, + expectedPool: make(map[peer.ID]*peerSyncWorker), + }, + "3_available_peers": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + return newSyncWorkerPool(networkMock, nil) + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + peer.ID("available-3"): {status: available}, + }, + }, + "2_available_peers_1_to_ignore": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + workerPool := newSyncWorkerPool(networkMock, nil) + workerPool.ignorePeers[peer.ID("available-3")] = struct{}{} + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + }, + }, + "peer_punishment_not_valid_anymore": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + workerPool := newSyncWorkerPool(networkMock, nil) + workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ + status: punished, + //arbitrary unix value + punishmentTime: time.Unix(1000, 0), + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + peer.ID("available-3"): { + status: available, + punishmentTime: time.Unix(1000, 0), + }, + }, + }, + "peer_punishment_still_valid": { + setupWorkerPool: func(t *testing.T) *syncWorkerPool { + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + networkMock.EXPECT(). + AllConnectedPeersID(). + Return([]peer.ID{ + peer.ID("available-1"), + peer.ID("available-2"), + peer.ID("available-3"), + }) + workerPool := newSyncWorkerPool(networkMock, nil) + workerPool.workers[peer.ID("available-3")] = &peerSyncWorker{ + status: punished, + punishmentTime: stablePunishmentTime, + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("available-1"): {status: available}, + peer.ID("available-2"): {status: available}, + peer.ID("available-3"): { + status: punished, + punishmentTime: stablePunishmentTime, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + workerPool := tt.setupWorkerPool(t) + workerPool.useConnectedPeers() + + require.Equal(t, workerPool.workers, tt.expectedPool) + }) + } +} + +func TestSyncWorkerPool_newPeer(t *testing.T) { + t.Parallel() + stablePunishmentTime := time.Now().Add(time.Minute * 2) + + cases := map[string]struct { + peerID peer.ID + setupWorkerPool func(t *testing.T) *syncWorkerPool + expectedPool map[peer.ID]*peerSyncWorker + }{ + "very_fist_entry": { + peerID: peer.ID("peer-1"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + return newSyncWorkerPool(nil, nil) + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("peer-1"): {status: available}, + }, + }, + "peer_to_ignore": { + peerID: peer.ID("to-ignore"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + workerPool := newSyncWorkerPool(nil, nil) + workerPool.ignorePeers[peer.ID("to-ignore")] = struct{}{} + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{}, + }, + "peer_punishment_not_valid_anymore": { + peerID: peer.ID("free-again"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + workerPool := newSyncWorkerPool(nil, nil) + workerPool.workers[peer.ID("free-again")] = &peerSyncWorker{ + status: punished, + //arbitrary unix value + punishmentTime: time.Unix(1000, 0), + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("free-again"): { + status: available, + punishmentTime: time.Unix(1000, 0), + }, + }, + }, + "peer_punishment_still_valid": { + peerID: peer.ID("peer_punished"), + setupWorkerPool: func(*testing.T) *syncWorkerPool { + + workerPool := newSyncWorkerPool(nil, nil) + workerPool.workers[peer.ID("peer_punished")] = &peerSyncWorker{ + status: punished, + punishmentTime: stablePunishmentTime, + } + return workerPool + }, + expectedPool: map[peer.ID]*peerSyncWorker{ + peer.ID("peer_punished"): { + status: punished, + punishmentTime: stablePunishmentTime, + }, + }, + }, + } + + for tname, tt := range cases { + tt := tt + t.Run(tname, func(t *testing.T) { + t.Parallel() + + workerPool := tt.setupWorkerPool(t) + workerPool.newPeer(tt.peerID) + + require.Equal(t, workerPool.workers, tt.expectedPool) + }) + } +} + +func TestSyncWorkerPool_listenForRequests_submitRequest(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + requestMakerMock := NewMockRequestMaker(ctrl) + workerPool := newSyncWorkerPool(networkMock, requestMakerMock) + + stopCh := make(chan struct{}) + defer close(stopCh) + go workerPool.listenForRequests(stopCh) + + availablePeer := peer.ID("available-peer") + workerPool.newPeer(availablePeer) + + blockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") + blockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(blockHash), + 1, network.BootstrapRequestData, network.Descending) + mockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: blockHash, + Header: &types.Header{ + ParentHash: common. + MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), + }, + }, + }, + } + + // introduce a timeout of 5s then we can test the + // peer status change to busy + requestMakerMock.EXPECT(). + Do(availablePeer, blockRequest, &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + time.Sleep(5 * time.Second) + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *mockedBlockResponse + return nil + }) + + resultCh := make(chan *syncTaskResult) + workerPool.submitRequest(blockRequest, resultCh) + + // ensure the task is in the pool and was already + // assigned to the peer + time.Sleep(time.Second) + + totalWorkers := workerPool.totalWorkers() + require.Zero(t, totalWorkers) + + peerSync := workerPool.getPeerByID(availablePeer) + require.Equal(t, peerSync.status, busy) + + syncTaskResult := <-resultCh + require.NoError(t, syncTaskResult.err) + require.Equal(t, syncTaskResult.who, availablePeer) + require.Equal(t, syncTaskResult.request, blockRequest) + require.Equal(t, syncTaskResult.response, mockedBlockResponse) +} + +func TestSyncWorkerPool_listenForRequests_busyWorkers(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + networkMock := NewMockNetwork(ctrl) + requestMakerMock := NewMockRequestMaker(ctrl) + workerPool := newSyncWorkerPool(networkMock, requestMakerMock) + + stopCh := make(chan struct{}) + defer close(stopCh) + go workerPool.listenForRequests(stopCh) + + availablePeer := peer.ID("available-peer") + workerPool.newPeer(availablePeer) + + firstRequestBlockHash := common.MustHexToHash("0x750646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") + firstBlockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(firstRequestBlockHash), + 1, network.BootstrapRequestData, network.Descending) + + secondRequestBlockHash := common.MustHexToHash("0x897646b852a29e5f3668959916a03d6243a3137e91d0cd36870364931030f707") + secondBlockRequest := network.NewBlockRequest(*variadic.MustNewUint32OrHash(firstRequestBlockHash), + 1, network.BootstrapRequestData, network.Descending) + + firstMockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: firstRequestBlockHash, + Header: &types.Header{ + ParentHash: common. + MustHexToHash("0x5895897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), + }, + }, + }, + } + + secondMockedBlockResponse := &network.BlockResponseMessage{ + BlockData: []*types.BlockData{ + { + Hash: secondRequestBlockHash, + Header: &types.Header{ + ParentHash: common. + MustHexToHash("0x8965897f12e1a670609929433ac7a69dcae90e0cc2d9c32c0dce0e2a5e5e614e"), + }, + }, + }, + } + + // introduce a timeout of 5s then we can test the + // then we can simulate a busy peer + requestMakerMock.EXPECT(). + Do(availablePeer, firstBlockRequest, &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + time.Sleep(5 * time.Second) + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *firstMockedBlockResponse + return nil + }) + + requestMakerMock.EXPECT(). + Do(availablePeer, firstBlockRequest, &network.BlockResponseMessage{}). + DoAndReturn(func(_, _, response any) any { + responsePtr := response.(*network.BlockResponseMessage) + *responsePtr = *secondMockedBlockResponse + return nil + }) + + resultCh := make(chan *syncTaskResult) + + workerPool.submitRequests( + []*network.BlockRequestMessage{firstBlockRequest, secondBlockRequest}, resultCh) + + // ensure the task is in the pool and was already + // assigned to the peer + time.Sleep(time.Second) + require.Zero(t, workerPool.totalWorkers()) + + syncTaskResult := <-resultCh + require.NoError(t, syncTaskResult.err) + require.Equal(t, syncTaskResult.who, availablePeer) + require.Equal(t, syncTaskResult.request, firstBlockRequest) + require.Equal(t, syncTaskResult.response, firstMockedBlockResponse) + + syncTaskResult = <-resultCh + require.NoError(t, syncTaskResult.err) + require.Equal(t, syncTaskResult.who, availablePeer) + require.Equal(t, syncTaskResult.request, secondBlockRequest) + require.Equal(t, syncTaskResult.response, secondMockedBlockResponse) + + require.Equal(t, uint(1), workerPool.totalWorkers()) +} diff --git a/go.mod b/go.mod index e67004c449..0df6a994cd 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,9 @@ require ( github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/centrifuge/go-substrate-rpc-client/v4 v4.1.0 github.com/chyeh/pubip v0.0.0-20170203095919-b7e679cf541c + github.com/cockroachdb/pebble v0.0.0-20230721221451-fcaeb47a50e0 github.com/cosmos/go-bip39 v1.0.0 + github.com/dgraph-io/badger v1.6.2 github.com/dgraph-io/badger/v2 v2.2007.4 github.com/dgraph-io/badger/v4 v4.1.0 github.com/dgraph-io/ristretto v0.1.1 @@ -47,13 +49,19 @@ require ( ) require ( + github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/ChainSafe/log15 v1.0.0 // indirect + github.com/DataDog/zstd v1.5.2 // indirect github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.9.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.3 // indirect + github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -70,6 +78,7 @@ require ( github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.1 // indirect @@ -108,6 +117,8 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect github.com/leodido/go-urn v1.2.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect @@ -159,6 +170,7 @@ require ( github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/rs/cors v1.8.2 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/smartystreets/assertions v1.13.0 // indirect diff --git a/go.sum b/go.sum index 40099d6fcc..c35dea51af 100644 --- a/go.sum +++ b/go.sum @@ -43,7 +43,9 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/chaindb v0.1.5 h1:aoji9HJxZlaG0J+OZUbjbf3Jrf1iij7qAIfjTwFIWWE= @@ -54,15 +56,23 @@ github.com/ChainSafe/log15 v1.0.0 h1:vRDVtWtVwIH5uSCBvgTTZh6FA58UBJ6+QiiypaZfBf8 github.com/ChainSafe/log15 v1.0.0/go.mod h1:5v1+ALHtdW0NfAeeoYyKmzCAMcAeqkdhIg4uxXWIgOg= github.com/ChainSafe/wazero v0.0.0-20230710171859-39a4c235ec1f h1:/sI8TMJ77HL2UImQs7pY7khVN96EXQJGVOrX88dTpcY= github.com/ChainSafe/wazero v0.0.0-20230710171859-39a4c235ec1f/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -116,6 +126,20 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= +github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20230721221451-fcaeb47a50e0 h1:6OwRzk7AKRNumJttanSCJPVMkmXvfioVgijKfJHAceU= +github.com/cockroachdb/pebble v0.0.0-20230721221451-fcaeb47a50e0/go.mod h1:FN5O47SBEz5+kO9fG8UTR64g2WS1u5ZFCgTvxGjoSks= +github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6 h1:DJK8W/iB+s/qkTtmXSrHA49lp5O3OsR7E6z4byOLy34= +github.com/cockroachdb/tokenbucket v0.0.0-20230613231145-182959a1fad6/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -133,6 +157,7 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -151,12 +176,15 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeC github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/badger/v4 v4.1.0 h1:E38jc0f+RATYrycSUf9LMv/t47XAy+3CApyYSq4APOQ= github.com/dgraph-io/badger/v4 v4.1.0/go.mod h1:P50u28d39ibBRmIJuQC/NSdBOg46HnHw7al2SW5QRHg= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= @@ -171,6 +199,7 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= @@ -179,11 +208,15 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -196,13 +229,21 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -211,6 +252,7 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -236,14 +278,20 @@ github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -277,6 +325,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -284,6 +333,7 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v23.1.21+incompatible h1:bUqzx/MXCDxuS0hRJL2EfjyZL3uQrPbMocUa8zGqsTA= @@ -354,6 +404,7 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= @@ -369,8 +420,10 @@ github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7 github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -397,6 +450,11 @@ github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -421,14 +479,23 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= @@ -438,11 +505,16 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= @@ -485,18 +557,25 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= @@ -523,6 +602,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -552,11 +632,16 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/nanobox-io/golang-scribble v0.0.0-20190309225732-aa3e7c118975 h1:zm/Rb2OsnLWCY88Njoqgo4X6yt/lx3oBNWhepX0AOMU= github.com/nanobox-io/golang-scribble v0.0.0-20190309225732-aa3e7c118975/go.mod h1:4Mct/lWCFf1jzQTTAaWtOI7sXqmG+wBeiBfT4CxoaJk= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss= @@ -580,6 +665,9 @@ github.com/phuslu/iploc v1.0.20230201 h1:AMhy7j8z0N5iI0jaqh514KTDEB7wVdQJ4Y4DJPC github.com/phuslu/iploc v1.0.20230201/go.mod h1:gsgExGWldwv1AEzZm+Ki9/vGfyjkL33pbSr9HGpt2Xg= github.com/pierrec/xxHash v0.1.5 h1:n/jBpwTHiER4xYvK3/CdPVnLDPchj8eTJFFLUb4QHBo= github.com/pierrec/xxHash v0.1.5/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -616,12 +704,17 @@ github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2Gk github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -649,9 +742,11 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= @@ -705,14 +800,21 @@ github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefld github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vedhavyas/go-subkey v1.0.4 h1:QwjBZx4w7qXC2lmqol2jJfhaNXPI9BsgLZiMiCwqGDU= github.com/vedhavyas/go-subkey v1.0.4/go.mod h1:aOIil/KS9hJlnr9ZSQKSoXdu/MbnkCxG4x9IOlLsMtI= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= @@ -725,7 +827,14 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdz github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 h1:Y1/FEOpaCpD21WxrmfeIYCFPuVPRCY2XZTWzTNHGw30= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -775,14 +884,17 @@ golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= @@ -812,6 +924,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -832,10 +945,12 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -843,6 +958,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -867,6 +983,7 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -913,6 +1030,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -948,12 +1066,16 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -970,6 +1092,7 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= @@ -978,15 +1101,18 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1034,6 +1160,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= @@ -1074,6 +1201,7 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -1114,6 +1242,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1133,6 +1263,7 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1154,9 +1285,13 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1166,6 +1301,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/internal/database/database.go b/internal/database/database.go new file mode 100644 index 0000000000..5893fe9280 --- /dev/null +++ b/internal/database/database.go @@ -0,0 +1,58 @@ +package database + +import ( + "io" +) + +type Reader interface { + Get(key []byte) ([]byte, error) + Has(key []byte) (bool, error) +} + +type Writer interface { + Put(key, value []byte) error + Del(key []byte) error + Flush() error +} + +// Iterator iterates over key/value pairs in ascending key order. +// Must be released after use. +type Iterator interface { + Valid() bool + Next() bool + Key() []byte + Value() []byte + First() bool + Release() + SeekGE(key []byte) bool +} + +// Batch is a write-only operation. +type Batch interface { + io.Closer + Writer + + ValueSize() int + Reset() +} + +// Database wraps all database operations. All methods are safe for concurrent use. +type Database interface { + Reader + Writer + io.Closer + + Path() string + NewBatch() Batch + NewIterator() Iterator + NewPrefixIterator(prefix []byte) Iterator +} + +type Table interface { + Reader + Writer + io.Closer + Path() string + NewBatch() Batch + NewIterator() Iterator +} diff --git a/internal/database/pebble.go b/internal/database/pebble.go new file mode 100644 index 0000000000..fb39dd29a5 --- /dev/null +++ b/internal/database/pebble.go @@ -0,0 +1,141 @@ +package database + +import ( + "errors" + "fmt" + "os" + + "github.com/ChainSafe/gossamer/internal/log" + "github.com/cockroachdb/pebble" + "github.com/cockroachdb/pebble/vfs" +) + +var logger = log.NewFromGlobal(log.AddContext("internal", "database")) +var _ Database = (*pebbleDB)(nil) + +type pebbleDB struct { + path string + db *pebble.DB +} + +func NewPebble(path string, inMemory bool) (*pebbleDB, error) { + opts := &pebble.Options{} + if inMemory { + opts = &pebble.Options{FS: vfs.NewMem()} + } else { + if err := os.MkdirAll(path, os.ModePerm); err != nil { + return nil, err + } + } + + db, err := pebble.Open(path, opts) + if err != nil { + return nil, fmt.Errorf("oppening pebble db: %w", err) + } + + return &pebbleDB{path, db}, nil +} + +func (p *pebbleDB) Path() string { + return p.path +} + +func (p *pebbleDB) Put(key, value []byte) error { + err := p.db.Set(key, value, &pebble.WriteOptions{}) + if err != nil { + return fmt.Errorf("writing 0x%x with value 0x%x to database: %w", + key, value, err) + } + return nil +} + +func (p *pebbleDB) Get(key []byte) (value []byte, err error) { + value, closer, err := p.db.Get(key) + if err != nil { + return nil, fmt.Errorf("getting 0x%x from database: %w", key, err) + } + + if err := closer.Close(); err != nil { + return nil, fmt.Errorf("closing after get: %w", err) + } + + valueCpy := make([]byte, len(value)) + copy(valueCpy[:], value[:]) + return valueCpy, err +} + +func (p *pebbleDB) Has(key []byte) (exists bool, err error) { + value, closer, err := p.db.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return false, nil + } + + return false, err + } + + if err := closer.Close(); err != nil { + return false, fmt.Errorf("closing after get: %w", err) + } + + return value != nil, err +} + +func (p *pebbleDB) Del(key []byte) error { + err := p.db.Delete(key, &pebble.WriteOptions{}) + if err != nil { + return fmt.Errorf("deleting 0x%x from database: %w", key, err) + } + + return nil +} + +func (p *pebbleDB) Close() error { + return p.db.Close() +} + +func (p *pebbleDB) Flush() error { + err := p.db.Flush() + if err != nil { + return fmt.Errorf("flushing database: %w", err) + } + + return nil +} + +func (p *pebbleDB) NewBatch() Batch { + return &pebbleBatch{ + batch: p.db.NewBatch(), + } +} + +func (p *pebbleDB) NewIterator() Iterator { + return &pebbleIterator{ + p.db.NewIter(nil), + } +} + +func (p *pebbleDB) NewPrefixIterator(prefix []byte) Iterator { + keyUpperBound := func(b []byte) []byte { + end := make([]byte, len(b)) + copy(end, b) + + for i := len(end) - 1; i >= 0; i-- { + end[i] = end[i] + 1 + if end[i] != 0 { + return end[:i+1] + } + } + + return nil + } + + prefixIterOptions := &pebble.IterOptions{ + LowerBound: prefix, + UpperBound: keyUpperBound(prefix), + } + + return &pebbleIterator{ + p.db.NewIter(prefixIterOptions), + } +} diff --git a/internal/database/pebble_batch.go b/internal/database/pebble_batch.go new file mode 100644 index 0000000000..ed961bf9df --- /dev/null +++ b/internal/database/pebble_batch.go @@ -0,0 +1,49 @@ +package database + +import ( + "fmt" + + "github.com/cockroachdb/pebble" +) + +var _ Batch = (*pebbleBatch)(nil) + +type pebbleBatch struct { + batch *pebble.Batch +} + +func (pb *pebbleBatch) Put(key, value []byte) error { + err := pb.batch.Set(key, value, &pebble.WriteOptions{}) + if err != nil { + return fmt.Errorf("setting to batch writer: %w", err) + } + return nil +} +func (pb *pebbleBatch) Del(key []byte) error { + err := pb.batch.Delete(key, &pebble.WriteOptions{}) + if err != nil { + return fmt.Errorf("setting to batch delete: %w", err) + } + return nil +} + +func (pb *pebbleBatch) Flush() error { + err := pb.batch.Commit(&pebble.WriteOptions{}) + if err != nil { + return fmt.Errorf("committing batch: %w", err) + } + + return nil +} + +func (pb *pebbleBatch) ValueSize() int { + return int(pb.batch.Count()) +} + +func (pb *pebbleBatch) Reset() { + pb.batch.Reset() +} + +func (pb *pebbleBatch) Close() error { + return pb.batch.Close() +} diff --git a/internal/database/pebble_iterator.go b/internal/database/pebble_iterator.go new file mode 100644 index 0000000000..954f1f0df2 --- /dev/null +++ b/internal/database/pebble_iterator.go @@ -0,0 +1,16 @@ +package database + +import "github.com/cockroachdb/pebble" + +var _ Iterator = (*pebbleIterator)(nil) + +type pebbleIterator struct { + *pebble.Iterator +} + +func (pi *pebbleIterator) Release() { + err := pi.Close() + if err != nil { + logger.Criticalf("while closing iterator: %s", err) + } +} diff --git a/internal/database/pebble_test.go b/internal/database/pebble_test.go new file mode 100644 index 0000000000..e824cc99a7 --- /dev/null +++ b/internal/database/pebble_test.go @@ -0,0 +1,211 @@ +package database + +import ( + "fmt" + "os" + "testing" + + "github.com/cockroachdb/pebble" + "github.com/stretchr/testify/require" +) + +type testAssertion struct { + input string + expected string +} + +func testSetup() []testAssertion { + tests := []testAssertion{ + {"camel", "camel"}, + {"walrus", "walrus"}, + {"296204", "296204"}, + {"\x00123\x00", "\x00123\x00"}, + } + return tests +} + +func testNewPebble(t *testing.T) Database { + t.Helper() + + db, err := NewPebble(t.TempDir(), false) + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + err := db.Close() + require.NoError(t, err) + }) + + return db +} + +func TestPebbleDatabaseImplementations(t *testing.T) { + db := testNewPebble(t) + + testPutGetter(t, db) + testHasGetter(t, db) + testUpdateGetter(t, db) + testDelGetter(t, db) + testGetPath(t, db) +} + +func TestPebbleDBBatch(t *testing.T) { + db := testNewPebble(t) + testBatchPutAndDelete(t, db) +} + +func TestPebbleDBIterator(t *testing.T) { + db := testNewPebble(t) + testNextKeyIterator(t, db) + testSeekKeyValueIterator(t, db) +} + +func testPutGetter(t *testing.T, db Database) { + tests := testSetup() + for _, v := range tests { + err := db.Put([]byte(v.input), []byte(v.input)) + require.NoError(t, err) + + data, err := db.Get([]byte(v.input)) + require.NoError(t, err) + + require.Equal(t, data, []byte(v.expected)) + } +} + +func testHasGetter(t *testing.T, db Database) { + tests := testSetup() + + for _, v := range tests { + exists, err := db.Has([]byte(v.input)) + require.NoError(t, err) + require.True(t, exists) + } +} + +func testUpdateGetter(t *testing.T, db Database) { + tests := testSetup() + + for _, v := range tests { + err := db.Put([]byte(v.input), []byte("?")) + require.NoError(t, err) + + data, err := db.Get([]byte(v.input)) + require.NoError(t, err) + + require.Equal(t, data, []byte("?")) + } +} + +func testDelGetter(t *testing.T, db Database) { + tests := testSetup() + + for _, v := range tests { + err := db.Del([]byte(v.input)) + require.NoError(t, err) + + d, _ := db.Get([]byte(v.input)) + require.Greater(t, len(d), 1) + } +} + +func testGetPath(t *testing.T, db Database) { + dir := db.Path() + fi, err := os.Stat(dir) + require.NoError(t, err) + require.True(t, fi.IsDir()) +} + +func testBatchPutAndDelete(t *testing.T, db Database) { + key := []byte("camel") + value := []byte("camel-value") + + batch := db.NewBatch() + err := batch.Put(key, value) + require.NoError(t, err) + + testFlushAndClose(t, batch, 1) + + deleteBatch := db.NewBatch() + err = deleteBatch.Del(key) + require.NoError(t, err) + + retrievedValue, err := db.Get(key) + require.NoError(t, err) + require.Equal(t, value, retrievedValue) + + testFlushAndClose(t, deleteBatch, 1) + + _, err = db.Get(key) + require.ErrorIs(t, err, pebble.ErrNotFound) +} + +func testFlushAndClose(t *testing.T, batch Batch, expectedSize int) { + t.Helper() + + err := batch.Flush() + require.NoError(t, err) + + size := batch.ValueSize() + require.Equal(t, expectedSize, size) + + batch.Close() + size = batch.ValueSize() + require.Equal(t, 0, size) +} + +func testIteratorSetup(t *testing.T, db Database) { + t.Helper() + batch := db.NewBatch() + + for i := 0; i < 5; i++ { + key := []byte(fmt.Sprintf("camel-%d", i)) + value := []byte(fmt.Sprintf("camel-value-%d", i)) + err := batch.Put(key, value) + require.NoError(t, err) + } + + err := batch.Flush() + require.NoError(t, err) +} + +func testNextKeyIterator(t *testing.T, db Database) { + testIteratorSetup(t, db) + + it := db.NewIterator() + defer it.Release() + + counter := 0 + for succ := it.First(); succ; succ = it.Next() { + require.NotNil(t, it.Key()) + require.NotNil(t, it.Value()) + counter++ + } + + // testIteratorSetup creates 5 entries + const expected = 5 + require.Equal(t, expected, counter) +} + +func testSeekKeyValueIterator(t *testing.T, db Database) { + testIteratorSetup(t, db) + kv := map[string]string{ + "camel-0": "camel-value-0", + "camel-1": "camel-value-1", + "camel-2": "camel-value-2", + "camel-3": "camel-value-3", + "camel-4": "camel-value-4", + } + + it := db.NewIterator() + defer it.Release() + + for succ := it.SeekGE([]byte("camel-")); succ; succ = it.Next() { + expectedValue, ok := kv[string(it.Key())] + require.True(t, ok) + + require.True(t, it.Valid()) + require.Equal(t, it.Value(), []byte(expectedValue)) + } +} diff --git a/internal/database/table.go b/internal/database/table.go new file mode 100644 index 0000000000..a6fd205612 --- /dev/null +++ b/internal/database/table.go @@ -0,0 +1,62 @@ +package database + +import ( + "bytes" +) + +type table struct { + db Database + prefix []byte +} + +var _ Table = (*table)(nil) + +func NewTable(db Database, prefix string) Table { + return &table{ + db: db, + prefix: []byte(prefix), + } +} + +func (t *table) Path() string { + return string(t.prefix) +} + +func (t *table) Get(key []byte) ([]byte, error) { + tableItemKey := bytes.Join([][]byte{t.prefix, key}, nil) + return t.db.Get(tableItemKey) +} + +func (t *table) Has(key []byte) (bool, error) { + tableItemKey := bytes.Join([][]byte{t.prefix, key}, nil) + return t.db.Has(tableItemKey) +} + +func (t *table) Put(key, value []byte) error { + tableItemKey := bytes.Join([][]byte{t.prefix, key}, nil) + return t.db.Put(tableItemKey, value) +} + +func (t *table) Del(key []byte) error { + tableItemKey := bytes.Join([][]byte{t.prefix, key}, nil) + return t.db.Del(tableItemKey) +} + +func (t *table) Flush() error { + return t.db.Flush() +} + +func (t *table) Close() error { + return t.db.Close() +} + +func (t *table) NewBatch() Batch { + return &tableBatch{ + batch: t.db.NewBatch(), + prefix: t.prefix, + } +} + +func (t *table) NewIterator() Iterator { + return t.db.NewPrefixIterator(t.prefix) +} diff --git a/internal/database/table_batch.go b/internal/database/table_batch.go new file mode 100644 index 0000000000..3ebd43f774 --- /dev/null +++ b/internal/database/table_batch.go @@ -0,0 +1,38 @@ +package database + +import ( + "bytes" +) + +var _ Batch = (*tableBatch)(nil) + +type tableBatch struct { + batch Batch + prefix []byte +} + +func (tb *tableBatch) Put(key, value []byte) error { + tableItemKey := bytes.Join([][]byte{tb.prefix, key}, nil) + return tb.batch.Put(tableItemKey, value) +} + +func (tb *tableBatch) Del(key []byte) error { + tableItemKey := bytes.Join([][]byte{tb.prefix, key}, nil) + return tb.batch.Del(tableItemKey) +} + +func (tb *tableBatch) Flush() error { + return tb.batch.Flush() +} + +func (tb *tableBatch) ValueSize() int { + return tb.batch.ValueSize() +} + +func (tb *tableBatch) Reset() { + tb.batch.Reset() +} + +func (tb *tableBatch) Close() error { + return tb.batch.Close() +} diff --git a/lib/babe/verify_integration_test.go b/lib/babe/verify_integration_test.go index 49731c502f..ff875df585 100644 --- a/lib/babe/verify_integration_test.go +++ b/lib/babe/verify_integration_test.go @@ -11,11 +11,11 @@ import ( "testing" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/digest" "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/sr25519" "github.com/ChainSafe/gossamer/pkg/scale" @@ -28,7 +28,7 @@ func TestVerificationManager_OnDisabled_InvalidIndex(t *testing.T) { genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, nil) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -52,7 +52,7 @@ func TestVerificationManager_OnDisabled_NewDigest(t *testing.T) { genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, nil) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -102,7 +102,7 @@ func TestVerificationManager_OnDisabled_DuplicateDigest(t *testing.T) { epochData, err := babeService.initiateEpoch(testEpochIndex) require.NoError(t, err) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) vm := NewVerificationManager(babeService.blockState, slotState, babeService.epochState) @@ -136,7 +136,7 @@ func TestVerificationManager_VerifyBlock_Secondary(t *testing.T) { genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, nil) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -187,7 +187,7 @@ func TestVerificationManager_VerifyBlock_CurrentEpoch(t *testing.T) { genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, nil) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -225,7 +225,7 @@ func TestVerificationManager_VerifyBlock_FutureEpoch(t *testing.T) { genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, babeConfig) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -275,7 +275,7 @@ func TestVerificationManager_VerifyBlock_MultipleEpochs(t *testing.T) { genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, babeConfig) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -335,7 +335,7 @@ func TestVerificationManager_VerifyBlock_InvalidBlockOverThreshold(t *testing.T) genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, babeConfig) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -383,7 +383,7 @@ func TestVerificationManager_VerifyBlock_InvalidBlockAuthority(t *testing.T) { genesisBob, genesisTrieBob, genesisHeaderBob := newWestendDevGenesisWithTrieAndHeader(t) babeServiceBob := createTestService(t, ServiceConfig{}, genesisBob, genesisTrieBob, genesisHeaderBob, babeConfig) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -437,7 +437,7 @@ func TestVerifyPrimarySlotWinner(t *testing.T) { digest, ok := babePreDigest.(types.BabePrimaryPreDigest) require.True(t, ok) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -470,7 +470,7 @@ func TestVerifyAuthorshipRight(t *testing.T) { slot := getSlot(t, runtime, time.Now()) block := createTestBlockWithSlot(t, babeService, &genesisHeader, [][]byte{}, testEpochIndex, epochData, slot) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -488,7 +488,7 @@ func TestVerifyAuthorshipRight_Equivocation(t *testing.T) { genesis, genesisTrie, genesisHeader := newWestendDevGenesisWithTrieAndHeader(t) babeService := createTestService(t, ServiceConfig{}, genesis, genesisTrie, genesisHeader, nil) - db, err := chaindb.NewBadgerDB(&chaindb.Config{DataDir: t.TempDir(), InMemory: true}) + db, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) slotState := state.NewSlotState(db) @@ -586,10 +586,7 @@ func TestVerifyForkBlocksWithRespectiveEpochData(t *testing.T) { err := stateService.Initialise(&genesis, &genesisHeader, &trie) require.NoError(t, err) - inMemoryDB, err := chaindb.NewBadgerDB(&chaindb.Config{ - InMemory: true, - DataDir: t.TempDir(), - }) + inMemoryDB, err := database.NewPebble(t.TempDir(), true) require.NoError(t, err) epochState, err := state.NewEpochStateFromGenesis(inMemoryDB, stateService.Block, epochBABEConfig) diff --git a/lib/grandpa/grandpa.go b/lib/grandpa/grandpa.go index 85d5b20ab2..c3d94605e6 100644 --- a/lib/grandpa/grandpa.go +++ b/lib/grandpa/grandpa.go @@ -13,7 +13,6 @@ import ( "sync/atomic" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/state" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" @@ -22,6 +21,7 @@ import ( "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" "github.com/libp2p/go-libp2p/core/peer" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -1171,7 +1171,7 @@ func (s *Service) handleCommitMessage(commitMessage *CommitMessage) error { err := verifyBlockHashAgainstBlockNumber(s.blockState, commitMessage.Vote.Hash, uint(commitMessage.Vote.Number)) if err != nil { - if errors.Is(err, chaindb.ErrKeyNotFound) { + if errors.Is(err, pebble.ErrNotFound) { s.tracker.addCommit(commitMessage) } diff --git a/lib/grandpa/message_handler.go b/lib/grandpa/message_handler.go index 4051fe3bda..3f7325bae5 100644 --- a/lib/grandpa/message_handler.go +++ b/lib/grandpa/message_handler.go @@ -8,13 +8,13 @@ import ( "errors" "fmt" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/network" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/blocktree" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" "github.com/libp2p/go-libp2p/core/peer" ) @@ -166,7 +166,7 @@ func (h *MessageHandler) handleCatchUpResponse(msg *CatchUpResponse) error { err := verifyBlockHashAgainstBlockNumber(h.blockState, msg.Hash, uint(msg.Number)) if err != nil { - if errors.Is(err, chaindb.ErrKeyNotFound) { + if errors.Is(err, pebble.ErrNotFound) { h.grandpa.tracker.addCatchUpResponse(msg) logger.Infof("we might not have synced to the given block %s yet: %s", msg.Hash, err) return nil @@ -285,7 +285,7 @@ func (h *MessageHandler) verifyPreVoteJustification(msg *CatchUpResponse) (commo for _, pvj := range msg.PreVoteJustification { err := verifyBlockHashAgainstBlockNumber(h.blockState, pvj.Vote.Hash, uint(pvj.Vote.Number)) if err != nil { - if errors.Is(err, chaindb.ErrKeyNotFound) { + if errors.Is(err, pebble.ErrNotFound) { h.grandpa.tracker.addCatchUpResponse(msg) logger.Infof("we might not have synced to the given block %s yet: %s", pvj.Vote.Hash, err) continue @@ -371,7 +371,7 @@ func (h *MessageHandler) verifyPreCommitJustification(msg *CatchUpResponse) erro err = verifyBlockHashAgainstBlockNumber(h.blockState, just.Vote.Hash, uint(just.Vote.Number)) if err != nil { - if errors.Is(err, chaindb.ErrKeyNotFound) { + if errors.Is(err, pebble.ErrNotFound) { h.grandpa.tracker.addCatchUpResponse(msg) logger.Infof("we might not have synced to the given block %s yet: %s", just.Vote.Hash, err) continue diff --git a/lib/grandpa/message_tracker_test.go b/lib/grandpa/message_tracker_test.go index 0bf2775fc4..d468fb1674 100644 --- a/lib/grandpa/message_tracker_test.go +++ b/lib/grandpa/message_tracker_test.go @@ -11,13 +11,13 @@ import ( "sync" "testing" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/telemetry" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto/ed25519" "github.com/ChainSafe/gossamer/lib/keystore" "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/cockroachdb/pebble" "github.com/golang/mock/gomock" "github.com/libp2p/go-libp2p/core/peer" @@ -65,7 +65,7 @@ func TestMessageTracker_handleTick_commitMessage(t *testing.T) { blockStateMock.EXPECT(). GetHeader(testHash). - Return(nil, chaindb.ErrKeyNotFound) + Return(nil, pebble.ErrNotFound) grandpaService := &Service{ telemetry: nil, diff --git a/lib/runtime/test_helpers.go b/lib/runtime/test_helpers.go index 45b17c3e8a..c6a5e836ac 100644 --- a/lib/runtime/test_helpers.go +++ b/lib/runtime/test_helpers.go @@ -15,8 +15,8 @@ import ( "testing" "time" - "github.com/ChainSafe/chaindb" "github.com/ChainSafe/gossamer/dot/types" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/babe/inherents" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/crypto" @@ -31,13 +31,10 @@ import ( ) // NewInMemoryDB creates a new in-memory database -func NewInMemoryDB(t *testing.T) *chaindb.BadgerDB { +func NewInMemoryDB(t *testing.T) database.Database { testDatadirPath := t.TempDir() - db, err := chaindb.NewBadgerDB(&chaindb.Config{ - DataDir: testDatadirPath, - InMemory: true, - }) + db, err := database.NewPebble(testDatadirPath, true) require.NoError(t, err) t.Cleanup(func() { _ = db.Close() diff --git a/lib/runtime/wasmer/imports_test.go b/lib/runtime/wasmer/imports_test.go index 7f060d496c..60c24c416b 100644 --- a/lib/runtime/wasmer/imports_test.go +++ b/lib/runtime/wasmer/imports_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/ChainSafe/chaindb" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/types" "github.com/ChainSafe/gossamer/lib/crypto" @@ -1868,10 +1868,7 @@ func Test_ext_trie_blake2_256_verify_proof_version_1(t *testing.T) { tmp := t.TempDir() - memdb, err := chaindb.NewBadgerDB(&chaindb.Config{ - InMemory: true, - DataDir: tmp, - }) + memdb, err := database.NewPebble(tmp, true) require.NoError(t, err) otherTrie := trie.NewEmptyTrie() diff --git a/lib/runtime/wazero/imports_test.go b/lib/runtime/wazero/imports_test.go index c9d95f0a89..d671e1bd42 100644 --- a/lib/runtime/wazero/imports_test.go +++ b/lib/runtime/wazero/imports_test.go @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/ChainSafe/chaindb" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/ChainSafe/gossamer/lib/common/types" "github.com/ChainSafe/gossamer/lib/crypto" @@ -589,11 +589,7 @@ func Test_ext_trie_blake2_256_ordered_root_version_1(t *testing.T) { func Test_ext_trie_blake2_256_verify_proof_version_1(t *testing.T) { tmp := t.TempDir() - - memdb, err := chaindb.NewBadgerDB(&chaindb.Config{ - InMemory: true, - DataDir: tmp, - }) + memdb, err := database.NewPebble(tmp, true) require.NoError(t, err) otherTrie := trie.NewEmptyTrie() diff --git a/lib/trie/database.go b/lib/trie/database.go index 7100992a99..7e5c07bef4 100644 --- a/lib/trie/database.go +++ b/lib/trie/database.go @@ -7,11 +7,10 @@ import ( "bytes" "fmt" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/internal/trie/codec" "github.com/ChainSafe/gossamer/internal/trie/node" "github.com/ChainSafe/gossamer/lib/common" - - "github.com/ChainSafe/chaindb" ) // DBGetter gets a value corresponding to the given key. @@ -26,7 +25,7 @@ type DBPutter interface { // NewBatcher creates a new database batch. type NewBatcher interface { - NewBatch() chaindb.Batch + NewBatch() database.Batch } // Load reconstructs the trie from the database from the given root hash. diff --git a/lib/trie/database_test.go b/lib/trie/database_test.go index 2300adb415..037218d237 100644 --- a/lib/trie/database_test.go +++ b/lib/trie/database_test.go @@ -6,19 +6,16 @@ package trie import ( "testing" - "github.com/ChainSafe/chaindb" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func newTestDB(t *testing.T) chaindb.Database { - chainDBConfig := &chaindb.Config{ - InMemory: true, - } - database, err := chaindb.NewBadgerDB(chainDBConfig) +func newTestDB(t *testing.T) database.Table { + db, err := database.NewPebble("", true) require.NoError(t, err) - return chaindb.NewTable(database, "trie") + return database.NewTable(db, "trie") } func Test_Trie_Store_Load(t *testing.T) { diff --git a/lib/trie/proof/proof_test.go b/lib/trie/proof/proof_test.go index 8f813b4f61..40b68ed155 100644 --- a/lib/trie/proof/proof_test.go +++ b/lib/trie/proof/proof_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/ChainSafe/chaindb" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/lib/trie" "github.com/ChainSafe/gossamer/lib/trie/db" "github.com/stretchr/testify/require" @@ -35,16 +35,14 @@ func Test_Generate_Verify(t *testing.T) { rootHash, err := trie.Hash() require.NoError(t, err) - database, err := chaindb.NewBadgerDB(&chaindb.Config{ - InMemory: true, - }) + db, err := database.NewPebble("", true) require.NoError(t, err) - err = trie.WriteDirty(database) + err = trie.WriteDirty(db) require.NoError(t, err) for i, key := range keys { fullKeys := [][]byte{[]byte(key)} - proof, err := Generate(rootHash.ToBytes(), fullKeys, database) + proof, err := Generate(rootHash.ToBytes(), fullKeys, db) require.NoError(t, err) expectedValue := fmt.Sprintf("%x-%d", key, i) diff --git a/lib/trie/trie_endtoend_test.go b/lib/trie/trie_endtoend_test.go index 3ca189ebe5..eec8effff5 100644 --- a/lib/trie/trie_endtoend_test.go +++ b/lib/trie/trie_endtoend_test.go @@ -11,10 +11,10 @@ import ( "sync" "testing" - "github.com/ChainSafe/chaindb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/ChainSafe/gossamer/internal/database" "github.com/ChainSafe/gossamer/internal/trie/codec" "github.com/ChainSafe/gossamer/lib/common" ) @@ -267,11 +267,7 @@ func TestDeleteOddKeyLengths(t *testing.T) { } func TestTrieDiff(t *testing.T) { - cfg := &chaindb.Config{ - DataDir: t.TempDir(), - } - - db, err := chaindb.NewBadgerDB(cfg) + db, err := database.NewPebble(t.TempDir(), false) require.NoError(t, err) t.Cleanup(func() { @@ -279,7 +275,7 @@ func TestTrieDiff(t *testing.T) { require.NoError(t, err) }) - storageDB := chaindb.NewTable(db, "storage") + storageDB := database.NewTable(db, "storage") t.Cleanup(func() { err = storageDB.Close() require.NoError(t, err) diff --git a/lib/utils/utils.go b/lib/utils/utils.go index d94d3e4152..fd16e0fd31 100644 --- a/lib/utils/utils.go +++ b/lib/utils/utils.go @@ -14,8 +14,7 @@ import ( "strings" "testing" - "github.com/ChainSafe/chaindb" - "github.com/dgraph-io/badger/v2" + "github.com/ChainSafe/gossamer/internal/database" "github.com/stretchr/testify/require" ) @@ -23,11 +22,9 @@ import ( const DefaultDatabaseDir = "db" // SetupDatabase will return an instance of database based on basepath -func SetupDatabase(basepath string, inMemory bool) (*chaindb.BadgerDB, error) { - return chaindb.NewBadgerDB(&chaindb.Config{ - DataDir: filepath.Join(basepath, DefaultDatabaseDir), - InMemory: inMemory, - }) +func SetupDatabase(basepath string, inMemory bool) (database.Database, error) { + basepath = filepath.Join(basepath, DefaultDatabaseDir) + return database.NewPebble(basepath, inMemory) } // PathExists returns true if the named file or directory exists, otherwise false @@ -255,27 +252,11 @@ func GetProjectRootPath() (rootPath string, err error) { } // LoadChainDB load the db at the given path. -func LoadChainDB(basePath string) (*chaindb.BadgerDB, error) { - cfg := &chaindb.Config{ - DataDir: basePath, - } - +func LoadChainDB(basePath string) (database.Database, error) { // Open already existing DB - db, err := chaindb.NewBadgerDB(cfg) + db, err := database.NewPebble(basePath, false) if err != nil { - return nil, err - } - - return db, nil -} - -// LoadBadgerDB load the db at the given path. -func LoadBadgerDB(basePath string) (*badger.DB, error) { - opts := badger.DefaultOptions(basePath) - // Open already existing DB - db, err := badger.Open(opts) - if err != nil { - return nil, err + return nil, fmt.Errorf("opening database: %w", err) } return db, nil